diff --git "a/2702.jsonl" "b/2702.jsonl" new file mode 100644--- /dev/null +++ "b/2702.jsonl" @@ -0,0 +1,655 @@ +{"seq_id":"5684460148","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nori_data = pd.read_csv('covid_data.csv')\n# print(ori_data.info())\nori_data['date'] = pd.to_datetime(ori_data.date)\nori_data['year'] = pd.DatetimeIndex(ori_data.date).year\nori_data['month'] = pd.DatetimeIndex(ori_data.date).month\nori_data['weekday'] = pd.DatetimeIndex(ori_data.date).weekday\nori_data['day'] = pd.DatetimeIndex(ori_data.date).day\nfor_comparison = ori_data.query(\"iso_code==['USA', 'IND', 'BRA', 'RUS', 'GBR']\")\nfinal_data = for_comparison[\n ['date', 'total_cases', 'total_deaths', 'new_deaths', 'new_cases', 'positive_rate', 'female_smokers',\n 'male_smokers', 'month', 'weekday', 'day', 'year', 'location', 'iso_code']]\n\nsns.set_style('darkgrid')\n# plt.hist(x='new_cases', data=final_data, stacked=True)\n# plt.xlabel('Cases')\n# plt.ylabel('Count')\nplt.title('New Cases In India Vs USA')\n#sns.histplot(x='positive_rate', data=final_data.query(\"iso_code==['IND', 'USA']\"), hue='location', log_scale=True,fill=False)\nsns.histplot(final_data, x='month', y='female_smokers', bins=30, discrete=(True, False), log_scale=(False, True),\n cbar=True, cbar_kws=dict(shrink=.75))\nplt.show()\n","repo_name":"Parashar7/Exercise_Matplotlib","sub_path":"exercise_csv_histogram.py","file_name":"exercise_csv_histogram.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9322935392","text":"\"\"\"\nDriver for the Si7021 humidity and temperature sensor.\nBased on the code found at: https://github.com/herm/Si7021\n\"\"\"\n\nfrom time import sleep\nimport smbus\nimport ParameterStorage\n\nclass Si7021:\n\n TEMP_MEASURE_HOLD = 0xE3\n TEMP_MEASURE_NO_HOLD = 0xF3\n\n RH_NO_HOLD = 0xF5\n RH_HOLD = 0xE5\n LAST_TEMPERATURE = 0xE0\n \n READ_HEATER_CTRL = 0x11\n WRITE_HEATER_CTRL = 0x51\n\n READ_USR_REG = 0xE7\n WRITE_USR_REG = 0xE6\n\n RESET = 0xFE\n\n HEATER_OFFSET = 3.09\n HEATER_STEP = 6.074\n\n USR_RES1 = 128\n USR_VDDS = 64\n USR_HTRE = 4\n USR_RES0 = 1\n\n lastTemperatureMeasurement =-100\n lastHumidityMeasurement = -100\n\n TEMPERATURE_PARAM_NAME = \"Temperatura\"\n RELATIVE_HUMIDITY_PARAM_NAME = \"Wilgotność\"\n\n def __init__(self):\n self.bus = smbus.SMBus(1)\n self.addr = 0x40\n ParameterStorage.addParameter(self.TEMPERATURE_PARAM_NAME,\"°C\")\n ParameterStorage.addParameter(self.RELATIVE_HUMIDITY_PARAM_NAME,\"%\")\n \n def persistentBusRead(self, registerAddr, loops=50):\n \"\"\" Tries to read from the I2C bus as many times as defined - tries again if exception is raised \"\"\"\n loopCount =0\n retVal=0\n tryAgain = True \n while loopCount<=loops and tryAgain:\n try:\n retVal = self.bus.read_word_data(self.addr, registerAddr)\n tryAgain = False\n except:\n loopCount+=1\n sleep(0.5)\n if tryAgain:\n print(\"Read failed\")\n return retVal\n\n def persistentBusWrite(self, registerAddr, value=None, loops=20):\n \"\"\" Tries to write to the I2C bus as many times as defined - tries again if exception is raised \"\"\"\n loopCount =0 \n tryAgain = True \n while loopCount<=loops and tryAgain:\n try: \n if value==None:\n self.bus.write_bute(self.addr, registerAddr)\n else:\n self.bus.write_byte(self.addr, registerAddr, value)\n tryAgain = False\n except:\n loopCount+=1\n sleep(0.5)\n if tryAgain:\n print(\"Write failed\")\n\n def reset(self):\n \"\"\" Reset the sensor \"\"\"\n self.persistentBusWrite(self.RESET)\n #self.bus.write_byte(self.addr, self.RESET)\n\n def swapBytes(self, word):\n \"\"\" Swaps bytes \"\"\" \n retVal = ((word & 0xff) << 8) | (word >> 8)\n return retVal\n\n def readTemp(self, lastTemp=False): \n \"\"\" Reads the temperature from the sensor in degrees Celsius - if lastTemp set to True reads it from the buffer without measurement\"\"\" \n t = self.swapBytes(self.persistentBusRead(self.TEMP_MEASURE_HOLD if not lastTemp else self.LAST_TEMPERATURE))\n t = 175.72 * t / 65536. - 46.85 # recalculated like in DS to degrees Celsius\n ParameterStorage.provideValue(self.TEMPERATURE_PARAM_NAME,t)\n self.lastTemperatureMeasurement=t\n return t\n\n def readHumidity(self):\n \"\"\" Reads the relative humidity from the sensor\"\"\"\n rh =self.swapBytes(self.persistentBusRead(self.RH_HOLD))\n rh = 125. * rh / 65536. - 6 #recalulated like in DS\n rh = max(0, min(100, rh)) #in edge cases results can be over the limit - this is a fix - see DS\n ParameterStorage.provideValue(self.RELATIVE_HUMIDITY_PARAM_NAME,rh)\n self.lastHumidityMeasurement=rh\n return rh\n\n def read(self):\n \"\"\" Read relative humidity and temperature.\n\n Returns a tuple (rh, temperature)\n \"\"\"\n rh = self.readHumidity()\n t = self.readTemp(True)\n ParameterStorage.provideValue(self.TEMPERATURE_PARAM_NAME,t)\n ParameterStorage.provideValue(self.RELATIVE_HUMIDITY_PARAM_NAME,rh)\n return (rh, t)\n\n @property\n def heater_mA(self):\n \"\"\" Get heater current in mA \"\"\"\n usr = self.persistentBusRead(self.READ_USR_REG)#self.bus.read_byte_data(self.addr, self.READ_USR_REG)\n if usr & self.USR_HTRE:\n value = self.persistentBusRead(self.READ_HEATER_CTRL)#self.bus.read_byte_data(self.addr, self.READ_HEATER_CTRL)\n value = value * self.HEATER_STEP + self.HEATER_OFFSET\n return value\n return 0\n\n @heater_mA.setter\n def heater_mA(self, value):\n \"\"\" Set heater current in mA.\n\n Turing on and off of the heater is handled automatically.\n \"\"\"\n usr = self.persistentBusRead(self.READ_USR_REG)#self.bus.read_byte_data(self.addr, self.READ_USR_REG)\n if not value:\n usr &= ~self.USR_HTRE\n else:\n # Enable heater and calculate settings\n setting = 0\n if value > self.HEATER_OFFSET:\n value -= self.HEATER_OFFSET\n setting = int(round(value / self.HEATER_STEP)) # See DS 5.5\n setting = min(15, setting) #Avoid overflow\n #self.bus.write_byte_data(self.addr, self.WRITE_HEATER_CTRL, setting)\n usr |= self.USR_HTRE\n self.persistentBusWrite(self.WRITE_USR_REG,usr)#self.bus.write_byte_data(self.addr, self.WRITE_USR_REG, usr)\n\n def set_resultion(self, bits_rh):\n \"\"\" Select measurement resultion.\n\n bits_rh is the number of bits for the RH measurement. Number of\n bits for temperature is choosen accoring to the table in section 6.1\n of the datasheet.\n \"\"\"\n usr = self.persistentBusRead(self.READ_USR_REG)#self.bus.read_byte_data(self.addr, self.READ_USR_REG)\n usr &= ~(self.USR_RES0 | self.USR_RES1)\n if bits_rh == 8:\n usr |= self.USR_RES1\n elif bits_rh == 10:\n usr |= self.USR_RES1\n elif bits_rh == 11:\n usr |= self.USR_RES0 | self.USR_RES1\n elif bits_rh != 12:\n raise ValueError(\"Unsupported number of bits.\")\n self.persistentBusWrite(self.WRITE_USR_REG,usr)#self.bus.write_byte_data(self.addr, self.WRITE_USR_REG, usr)\n\n\n # Reading the device ID seems to be impossible with the smbus functions\n # as they do not support 2 byte register adresses. And the Si7021 does\n # not accept the address in two transactions\n","repo_name":"dziq1981/WindowsController","sub_path":"TemperatureSensor.py","file_name":"TemperatureSensor.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34448517174","text":"from flask import Flask, request\nimport os\nimport subprocess\n\napp = Flask(__name__)\n\nsubprocesses = {}\n\n@app.route('/start', methods=['POST'])\ndef start():\n global subprocesses\n token = request.form.get('token')\n if token in subprocesses:\n return \"Recording already in progress\"\n else:\n subprocesses[token] = subprocess.Popen(['python', 'record.py', token])\n return \"Recording started\"\n\n@app.route('/stop', methods=['POST'])\ndef stop():\n global subprocesses\n token = request.form.get('token')\n if token in subprocesses:\n subprocesses[token].send_signal(subprocess.signal.SIGINT)\n del subprocesses[token]\n return \"Recording stopped\"\n else:\n return \"No recording in progress\"\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9999)\n\n","repo_name":"umdoyuun/DIENG_Project","sub_path":"rasberryPi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24012177971","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom . import models, forms\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\n\n# 注册的视图函数\ndef register(request):\n if request.method == \"POST\":\n print(request.POST)\n print(\"=\" * 120)\n ret = {\"status\": 0, \"msg\": \"\"}\n form_obj = forms.RegForm(request.POST)\n print(request.POST)\n # 帮我做校验\n if form_obj.is_valid():\n\n # 校验通过,去数据库创建一个新的用户\n form_obj.cleaned_data.pop(\"re_password\")\n avatar_img = request.FILES.get(\"avatar\")\n models.UserInfo.objects.create_user(**form_obj.cleaned_data, avatar=avatar_img)\n ret[\"msg\"] = \"/index/\"\n return JsonResponse(ret)\n else:\n print(form_obj.errors)\n ret[\"status\"] = 1\n ret[\"msg\"] = form_obj.errors\n print(ret)\n print(\"=\" * 120)\n return JsonResponse(ret)\n # 生成一个form对象\n form_obj = forms.RegForm()\n print(form_obj.fields)\n return render(request, \"register.html\", {\"form_obj\": form_obj})\n # return render(request, \"form_test.html\", {\"form_obj\": form_obj})\n\n\n# 校验用户名是否已被注册\ndef check_username_exist(request):\n ret = {\"status\": 0, \"msg\": \"\"}\n username = request.GET.get(\"username\")\n print(username)\n is_exist = models.UserInfo.objects.filter(username=username)\n if is_exist:\n ret[\"status\"] = 1\n ret[\"msg\"] = \"用户名已被注册!\"\n return JsonResponse(ret)\n","repo_name":"LeeXyan/lxgzhw006","sub_path":"hw_001_Django/hw_006_register/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9326416343","text":"import argparse\nimport os\nimport pdb\nimport re\nimport socket\nimport sys\nimport timeit\n\nimport numpy as np\nimport scipy.io as sio\nimport tensorflow as tf\nfrom tensorflow.python.keras.callbacks import (Callback, CSVLogger,\n ModelCheckpoint)\nfrom tensorflow.python.keras.datasets import mnist\nfrom tensorflow.python.keras.layers import (BatchNormalization, Conv2D, Dense,\n Dropout, Flatten, Input, Lambda,\n MaxPooling2D, Reshape,\n UpSampling2D)\nfrom tensorflow.python.keras.models import Model\nfrom coupling_functions import fullcov, minvar, mse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--batch_size\", default=150, type=int, help=\"Batch size\")\nparser.add_argument(\"--p_drop\", default=0.4, type=float, help=\"Dropout rate\")\nparser.add_argument(\"--latent_dim\", default=2, type=int, help=\"Number of latent dims\")\n\nparser.add_argument(\"--cpl_fn\", default=\"minvar\",type=str, help=\"mse, mseBN, fullcov or minvar\")\nparser.add_argument(\"--cpl_str\", default=1e-3, type=float, help=\"coupling strength\")\n\nparser.add_argument(\"--n_epoch\", default=500, type=int, help=\"n(training epochs)\")\nparser.add_argument(\"--exp_name\", default='MNIST', type=str, help=\"Folder name to store results\")\nparser.add_argument(\"--model_id\", default='cnn', type=str, help=\"Model id part of result filenames\")\nparser.add_argument(\"--run_iter\", default=0, type=int, help=\"Run-specific id\")\n\n\ndef main(batch_size=150, p_drop=0.4, latent_dim=2,\n cpl_fn='minvar', cpl_str=1e-3,\n n_epoch=500, run_iter=0, model_id='cnn',exp_name='MNIST'):\n \n \n fileid = model_id + \\\n '_cf_' + cpl_fn + \\\n '_cs_' + str(cpl_str) + \\\n '_pd_' + str(p_drop) + \\\n '_bs_' + str(batch_size) + \\\n '_ld_' + str(latent_dim) + \\\n '_ne_' + str(n_epoch) + \\\n '_ri_' + str(run_iter)\n\n fileid = fileid.replace('.', '-')\n train_dat, train_lbl, val_dat, val_lbl, dir_pth = dataIO(exp_name=exp_name)\n \n #Architecture parameters ------------------------------\n input_dim = train_dat.shape[1]\n n_arms = 2\n fc_dim = 49\n \n #Model definition -------------------------------------\n M = {}\n M['in_ae'] = Input(shape=(28,28,1), name='in_ae')\n for i in range(n_arms):\n M['co1_ae_'+str(i)] = Conv2D(10, (3, 3), activation='relu', padding='same',name='co1_ae_'+str(i))(M['in_ae'])\n M['mp1_ae_'+str(i)] = MaxPooling2D((2, 2), padding='same',name='mp1_ae_'+str(i))(M['co1_ae_'+str(i)])\n M['dr1_ae_'+str(i)] = Dropout(rate=p_drop, name='dr1_ae_'+str(i))(M['mp1_ae_'+str(i)])\n M['fl1_ae_'+str(i)] = Flatten(name='fl1_ae_'+str(i))(M['dr1_ae_'+str(i)])\n M['fc01_ae_'+str(i)] = Dense(fc_dim, activation='relu', name='fc01_ae_'+str(i))(M['fl1_ae_'+str(i)])\n M['fc02_ae_'+str(i)] = Dense(fc_dim, activation='relu', name='fc02_ae_'+str(i))(M['fc01_ae_'+str(i)])\n M['fc03_ae_'+str(i)] = Dense(fc_dim, activation='relu', name='fc03_ae_'+str(i))(M['fc02_ae_'+str(i)])\n\n if cpl_fn in ['mse']:\n M['ld_ae_'+str(i)] = Dense(latent_dim, activation='linear', name='ld_ae_'+str(i))(M['fc03_ae_'+str(i)])\n elif cpl_fn in ['mseBN', 'fullcov', 'minvar']:\n M['fc04_ae_'+str(i)] = Dense(latent_dim, activation='linear', name='fc04_ae_'+str(i))(M['fc03_ae_'+str(i)])\n M['ld_ae_'+str(i)] = BatchNormalization(scale=False, center=False, epsilon=1e-10, momentum=0.99, name='ld_ae_'+str(i))(M['fc04_ae_'+str(i)])\n\n M['fc05_ae_'+str(i)] = Dense(fc_dim, activation='relu', name='fc05_ae_'+str(i))(M['ld_ae_'+str(i)])\n M['fc06_ae_'+str(i)] = Dense(fc_dim, activation='relu', name='fc06_ae_'+str(i))(M['fc05_ae_'+str(i)])\n M['fc07_ae_'+str(i)] = Dense(fc_dim*4, activation='relu', name='fc07_ae_'+str(i))(M['fc06_ae_'+str(i)])\n M['re1_ae_'+str(i)] = Reshape((14, 14, 1), name='re1_ae_'+str(i))(M['fc07_ae_'+str(i)])\n M['us1_ae_'+str(i)] = UpSampling2D((2, 2),name = 'us1_ae_'+str(i))(M['re1_ae_'+str(i)])\n M['co2_ae_'+str(i)] = Conv2D(10, (3, 3), activation='relu', padding='same',name='co2_ae_'+str(i))(M['us1_ae_'+str(i)])\n M['ou_ae_'+str(i)] = Conv2D(1, (3, 3), activation='sigmoid', padding='same',name='ou_ae_'+str(i))(M['co2_ae_'+str(i)])\n\n cplAE = Model(inputs=M['in_ae'],\n outputs=[M['ou_ae_'+str(i)] for i in range(n_arms)] + [M['ld_ae_'+str(i)] for i in range(n_arms)])\n \n if cpl_fn in ['mse','mseBN']:\n cpl_fn_loss = mse\n elif cpl_fn == 'fullcov':\n cpl_fn_loss = fullcov\n elif cpl_fn == 'minvar':\n cpl_fn_loss = minvar\n\n assert type(cpl_fn)\n #Create loss dictionary\n loss_dict = {'ou_ae_0': mse(M['in_ae'],M['ou_ae_0']), \n 'ou_ae_1': mse(M['in_ae'],M['ou_ae_1']),\n 'ld_ae_0': cpl_fn_loss(M['ld_ae_0'], M['ld_ae_1']),\n 'ld_ae_1': cpl_fn_loss(M['ld_ae_1'], M['ld_ae_0'])}\n\n \n #Loss weights dictionary\n loss_wt_dict = {'ou_ae_0': 1.0, 'ou_ae_1': 1.0,\n 'ld_ae_0': cpl_str, \n 'ld_ae_1': cpl_str}\n\n #Add loss definitions to the model\n cplAE.compile(optimizer='adam', loss=loss_dict, loss_weights=loss_wt_dict)\n \n #Data feed\n train_input_dict = {'in_ae': train_dat}\n val_input_dict = {'in_ae': val_dat}\n train_output_dict = {'ou_ae_0': train_dat, \n 'ou_ae_1': train_dat, \n 'ld_ae_0': np.empty((train_dat.shape[0], latent_dim)), \n 'ld_ae_1': np.empty((train_dat.shape[0], latent_dim))}\n val_output_dict = {'ou_ae_0': val_dat, \n 'ou_ae_1': val_dat, \n 'ld_ae_0': np.empty((val_dat.shape[0], latent_dim)), \n 'ld_ae_1': np.empty((val_dat.shape[0], latent_dim))}\n \n log_cb = CSVLogger(filename=dir_pth['logs']+fileid+'.csv')\n \n #Train model\n cplAE.fit(train_input_dict, train_output_dict,\n validation_data=(val_input_dict, val_output_dict),\n batch_size=batch_size, initial_epoch=0, epochs=n_epoch,\n verbose=2, shuffle=True,\n callbacks = [log_cb])\n \n #Saving weights\n cplAE.save_weights(dir_pth['result']+fileid+'-modelweights'+'.h5')\n\n matsummary = {}\n #Trained model prediction\n for i in range(n_arms):\n encoder = Model(inputs=M['in_ae'], outputs=M['ld_ae_'+str(i)])\n matsummary['z_val_'+str(i)] = encoder.predict({'in_ae': val_dat})\n matsummary['z_train_'+str(i)] = encoder.predict({'in_ae': train_dat})\n matsummary['train_lbl']=train_lbl\n matsummary['val_lbl']=val_lbl\n sio.savemat(dir_pth['result']+fileid+'-summary.mat', matsummary)\n return\n\ndef dataIO(exp_name='MNIST'):\n from pathlib import Path\n\n dir_pth = {}\n curr_path = str(Path().absolute()) + '/'\n dir_pth['data'] = curr_path + 'data/raw/'\n dir_pth['result'] = curr_path + 'data/results/' + exp_name + '/'\n dir_pth['logs'] = dir_pth['result'] + 'logs/'\n Path(dir_pth['logs']).mkdir(parents=True, exist_ok=True) \n\n (train_dat, train_lbl), (val_dat, val_lbl) = mnist.load_data()\n \n train_dat = np.reshape(train_dat, (len(train_dat), 28, 28, 1))\n val_dat = np.reshape(val_dat, (len(val_dat), 28, 28, 1))\n train_dat = train_dat.astype('float32') / 255.\n val_dat = val_dat.astype('float32') / 255.\n return train_dat, train_lbl, val_dat, val_lbl, dir_pth\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n main(**vars(args))\n","repo_name":"AllenInstitute/coupledAE","sub_path":"MNIST_loss_fn.py","file_name":"MNIST_loss_fn.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"24685425951","text":"#!/usr/bin/env python3\nimport socket\nimport re\nimport argparse\nfrom urllib.parse import urlparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--url\", dest='target_host', help=\"Host target\")\nparser.add_argument(\"--download\", dest='download', help=\"file path to download\")\nargs = parser.parse_args()\n\nhost = args.target_host\ndownload_file = args.download\n\ndef recv_all(the_socket):\n total_data=[]\n data = the_socket.recv(8192)\n while (len(data) > 0):\n total_data.append(data)\n data = the_socket.recv(8192)\n data = b''.join(total_data)\n return data\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ndomain_name = urlparse(f'{host}').netloc\ns.connect((domain_name,80))\nheader = ( f'GET {download_file} HTTP/1.1\\r\\n'\n f'Host: {domain_name}\\r\\n'\n f'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36\\r\\n'\n f'Accept: */*\\r\\n'\n f'Accept-Language: vi-VN,vi;q=0.9,fr-FR;q=0.8,fr;q=0.7,en-US;q=0.6,en;q=0.5\\r\\n'\n f'Accept-Encoding: gzip, deflate\\r\\n\\r\\n'\n).encode()\n\ns.send(header)\n\nresponse = recv_all(s)\nif b\"HTTP/1.1 200 OK\" in response:\n length_file = re.findall(b\"Content-Length: ([0-9]+)\\r\\n\", response)[0].decode()\n print(\"Kích thước file ảnh: \" + length_file + \" bytes\")\n\n response_body = b''\n response_header = response.split(b'\\r\\n\\r\\n')[0]\n image_content = response[len(response_header)+4:]\n file_name = download_file.split(\"/\")[-1]\n f = open(f\"./file_upload/{file_name}\", \"wb\")\n f.write(image_content)\n f.close()\nelse:\n print(\"Không tồn tại file ảnh.\")\n exit()\n","repo_name":"mkeyys/Challenge04","sub_path":"httpdownload.py","file_name":"httpdownload.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43109765797","text":"import pandas as pd \nimport numpy as np\n\nimport datetime as dt\n\n\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\n### Spotify Credentials - must be set in local environment to run\nauth_manager = SpotifyClientCredentials()\nsp = spotipy.Spotify(auth_manager=auth_manager)\n\n\ndef get_tracks(genre):\n limit = 50\n max_requests = 2000\n ### TESTING VALUES\n # limit = 5# 50\n # max_requests = 20 ## 2000\n\n track_df = []\n for n in range(0, max_requests, limit):\n search_results = sp.search(q=f'genre: \"{genre}\"', type='track', limit=limit, offset = n, market='US')['tracks']['items']\n\n track_list = []\n \n for i in range(len(search_results)):\n track_info = [\n search_results[i].get('name'), \n search_results[i].get('artists')[0]['name'], \n search_results[i].get('album')['name'],\n search_results[i].get('id'),\n search_results[i].get('popularity'),\n ]\n track_list.append(track_info)\n\n ## create dataframe of track info\n track_list_df = pd.DataFrame(track_list, columns=['track_name', 'artist', 'album', 'track_id', 'popularity'])\n ## get audio features for tracks\n track_audio_features = pd.DataFrame.from_dict(sp.audio_features(tracks=track_list_df['track_id'].values.tolist()))\n drop_cols = ['type', 'id', 'uri', 'track_href', 'analysis_url']\n track_audio_features.drop(columns = drop_cols, inplace=True)\n ## concat both dataframs\n track_list_df = pd.concat([track_list_df, track_audio_features], axis=1)\n track_df.append(track_list_df)\n\n tracks = pd.concat(track_df, ignore_index=True)\n tracks['popularity'] = np.round(tracks['popularity']/100, 2)\n # tracks.to_csv(f'../data/{genre}-{tstamp}.csv', index=False)\n # genre = genre.replace(' ', '')\n \n ## duh, don't actually need to save csv here!\n # tracks.to_csv(f'./data/{genre}.csv', index=False)\n # print('Genre: ', genre, '' tracks.shape[0], ' entries.')\n print('We got ', tracks.shape[0], 'tracks from the ', genre, 'genre.')\n # pass\n return tracks\n\n\n\ndef make_track_URIs(track_ids):\n ### reformats track ids as track URIs\n ### need text spotify:track: in front of each ID to use in Spotify\n track_URIs = []\n for track_id in track_ids:\n uri = 'spotify:track:'+ track_id\n track_URIs.append(uri)\n return track_URIs\n\ndef create_playlist_file(track_ids, og_track_id, name):\n \n ### creates text file of Spotify URIs\n # track_list = og_track_id.values.tolist() + track_ids.values.tolist()\n track_list = list(og_track_id)+ track_ids\n track_URIs = make_track_URIs(track_list)\n ### write URIs to text file\n playlist = open(fr'./playlist_{name}.txt','w')\n playlist.writelines('%s\\n' % track for track in track_URIs) \n playlist.close()\n pass\n\ndef display_playlist(playlist_tracks):\n ### displays playlist track name, artist, album\n tracks_dict = sp.tracks(playlist_tracks)['tracks']\n playlist_info = []\n for i in range(len(playlist_tracks)):\n track = [\n tracks_dict[i]['name'], \n tracks_dict[i]['artists'][0]['name'],\n tracks_dict[i]['album']['name']\n ]\n playlist_info.append(track)\n \n playlist_df = pd.DataFrame(playlist_info, columns=['Title', 'Artist', 'Album'] )\n ### start index at 1\n playlist_df.index = np.arange(1,len(playlist_df)+1)\n return playlist_df\n\n\ncol_names_dict = {\n 'track_name': 'Track Name', \n 'track_number': 'Track', \n 'track_uri' : 'URI', \n 'popularity': 'Popularity', \n 'danceability': 'Danceability',\n 'energy' : 'Energy', \n 'key': 'Key', \n 'loudness': 'Loudness', \n 'mode': 'Mode', \n 'speechiness': 'Speechiness', \n 'acousticness': 'Acousticness',\n 'instrumentalness': 'Instrumentalness', \n 'liveness': 'Liveness', \n 'valence': 'Valence', \n 'tempo': 'Tempo', \n 'duration_ms': 'Duration',\n 'time_signature' : 'Time Signature'\n }\n\n###---------------------\ndef album_audio_features(ID):\n album_tracks_list = sp.album_tracks(ID, market='US')['items']\n \n album_tracks_URI = [album_tracks_list[i].get('uri') for i in range(len(album_tracks_list))]\n\n track_list = []\n \n raw_track_list = sp.tracks(album_tracks_URI, market='US')['tracks']\n \n track_list = []\n \n for i in range(len(raw_track_list)):\n track_info = [raw_track_list[i].get('name'), raw_track_list[i].get('track_number'), raw_track_list[i].get('uri'), raw_track_list[i].get('popularity')]\n track_list.append(track_info)\n\n track_info_df = pd.DataFrame(track_list, columns=['track_name', 'track_number', 'track_uri', 'popularity'])\n\n track_audio_features = sp.audio_features(tracks=track_info_df['track_uri'].values.tolist())\n\n audio_features_df = pd.DataFrame.from_dict(track_audio_features)\n\n drop_cols = ['type', 'id', 'uri', 'track_href', 'analysis_url']\n\n audio_features_df.drop(columns = drop_cols, inplace=True)\n\n album_df = pd.concat([track_info_df, audio_features_df], axis=1)\n \n return album_df\n\ndef get_album_info(ID):\n album = sp.album(ID)\n album_info = {\n 'name' : album['name'],\n 'artist' : album['artists'][0]['name'],\n 'artwork_url': album['images'][1]['url'], ## 300 W 64 H\n 'popularity' : album['popularity'],\n 'release_date': dt.datetime.strptime(album['release_date'],'%Y-%m-%d').strftime('%B %d, %Y'),\n }\n return album_info\n\ndef get_track_info(uri):\n track = sp.track(uri)\n track_info = {\n 'track_name' : track['name'],\n 'artist' : track['artists'][0]['name'],\n 'album' : track['album']['name'],\n 'artwork_url': track['album']['images'][1]['url'], ## 300 W 64 H\n 'release_date': dt.datetime.strptime(track['album']['release_date'],'%Y-%m-%d').strftime('%B %d, %Y'),\n 'track_id' : track['id'],\n 'popularity' : np.round(track['popularity']/100, 2),\n }\n # track_df = pd.DataFrame.from_dict(track_info)\n return track_info\n\ndef get_track_audio_features(track_info):\n ### Note: fcn can use ID, URI or URL from Spotify\n ### GET TRACK INFO FROM SPOTIFY\n \n track_info_list = [\n track_info['track_name'],\n track_info['artist'],\n track_info['album'],\n track_info['track_id'],\n track_info['popularity']\n ]\n \n ### GET TRACK AUDIO FEATURES FROM SPOTIFY\n track_audio_features = sp.audio_features(tracks=track_info['track_id'])\n \n audio_features_df = pd.DataFrame.from_dict(track_audio_features)\n drop_cols = ['type', 'id', 'uri', 'track_href', 'analysis_url']\n audio_features_df.drop(columns = drop_cols, inplace=True)\n\n ### create dataframe\n track_info_df = pd.DataFrame([track_info_list], columns=['track_name','artist', 'album', 'track_id', 'popularity'])\n \n track_audio_features = pd.concat([track_info_df, audio_features_df], axis=1)\n \n return track_audio_features\n\ndef get_track_data(uri):\n\n track_info = get_track_info(uri)\n\n track_df = get_track_audio_features(track_info)\n\n return track_df\n\n\ndef plot_audio_features(df, artist_name, album_name):\n df = df.iloc[::-1]\n ### Horizontal subplots\n df.plot.barh(\n x = 'track_name',\n y = ['valence','energy', 'danceability'],\n ylim = [0,1], \n sharey = True,\n subplots = True, \n layout = (1,3),\n figsize = (15,5),\n legend = False, \n # title = f'{artist_name} - {album_name}', \n xlabel = 'Track Name')\n plt.savefig('./images/album_audio_features.png')\n pass\n\ndef convert_duration(time_ms):\n secs = int((time_ms/1000)%60)\n if secs < 10:\n secs = str('0') + str(secs)\n mins = int((time_ms/(1000*60))%60)\n mins_secs = str(mins) + ':' + str(secs)\n return mins_secs\n\ndef format_display_track(track):\n ### display track features\n track['Time'] = track['duration_ms'].apply(convert_duration)\n track_feat = track[['Time','popularity', 'danceability', 'energy', 'valence', 'speechiness', 'instrumentalness','acousticness', 'liveness', 'loudness', 'tempo']]\n track_feat = track_feat.T\n track_feat.columns = ['Feature']\n\n return track_feat\n\ndef format_display_tracks(df_album):\n ### display track list\n df_track_list = df_album[['track_number', 'track_name']]\n df_track_list['Time'] = df_album['duration_ms'].apply(convert_duration)\n df_track_list.set_index('track_number', inplace=True)\n df_track_list.rename(columns=col_names_dict, inplace=True, errors='ignore')\n return df_track_list\n\ndef format_display_album_data(df_album):\n display_cols = [\n 'track_number', \n 'track_name', \n 'popularity', \n 'danceability',\n 'energy', \n 'valence',\n 'speechiness', \n 'acousticness',\n 'instrumentalness']\n\n df_album_display = df_album[display_cols]\n df_album_display.set_index('track_number', inplace=True)\n\n df_album_display.rename(columns=col_names_dict, inplace=True, errors='ignore')\n\n return df_album_display","repo_name":"rdepiero218/audio-feature-recommender-project","sub_path":"scripts/spotify_fcns.py","file_name":"spotify_fcns.py","file_ext":"py","file_size_in_byte":9212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8378820718","text":"import _plotly_utils.basevalidators\n\n\nclass ScattercarpetValidator(_plotly_utils.basevalidators.CompoundArrayValidator):\n def __init__(\n self, plotly_name=\"scattercarpet\", parent_name=\"layout.template.data\", **kwargs\n ):\n super(ScattercarpetValidator, self).__init__(\n plotly_name=plotly_name,\n parent_name=parent_name,\n data_class_str=kwargs.pop(\"data_class_str\", \"Scattercarpet\"),\n data_docs=kwargs.pop(\n \"data_docs\",\n \"\"\"\n\"\"\",\n ),\n **kwargs,\n )\n","repo_name":"plotly/plotly.py","sub_path":"packages/python/plotly/plotly/validators/layout/template/data/_scattercarpet.py","file_name":"_scattercarpet.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":14438,"dataset":"github-code","pt":"3"} +{"seq_id":"24270046824","text":"#!/usr/bin/python3\n\"\"\"\ndivide every part of matrix by divider\n\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"\n Function that return division of every elemnts of matrix by div\n Args:\n matrix: the atrix parameter\n div: divider (number)\n \"\"\"\n\n if not isinstance(div, (int, float)):\n raise TypeError('div must be a number')\n if div == 0:\n raise ZeroDivisionError('division by zero')\n\n curr = []\n\n for i in range(len(matrix)):\n if len(matrix[0]) != len(matrix[i]):\n raise TypeError(\n 'Each row of the matrix must have the same size')\n\n now = []\n\n for j in range(len(matrix[i])):\n if not isinstance(matrix[i][j], (int, float)):\n raise TypeError('matrix must be matrix of number')\n\n now.append(round(matrix[i][j] / div, 2))\n\n curr.append(now)\n\n return curr\n","repo_name":"Selomon1/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22467407975","text":"from estrato import Estrato\nimport numpy as np \n\nnum_estratos=int(input(\"ingrese el numero de estratos: \"))\nif __name__ == \"__main__\":\n\n esf_efec=[]\n esf_tot=[]\n list_ka=[]\n htot_list=[]\n f_list=[]\n htot=0\n count=0\n ftot=0\n zres=0\n\n for i in range(num_estratos):\n est=Estrato()\n est.EnterData()\n ka=np.tan(np.deg2rad(45-(est.af)/2))**2\n \n if len(esf_tot) != 0:\n se=est.pe*est.H + esf_efec[(count-1)]\n else:\n se=est.pe*est.H\n\n sa=ka*se - 2*(est.C)*(ka)**0.5\n su=est.pa*est.H\n st=sa+su\n esf_efec.append(se)\n esf_tot.append(st)\n list_ka.append(ka)\n htot=htot+est.H\n htot_list.append(htot)\n\n if len(esf_tot) > 1:\n sa0=(ka*esf_efec[(count-1)] - 2*(est.C)*(ka)**0.5)\n f1=sa0*est.H \n f2=1/2*(st-sa0)*est.H\n f=f1+f2\n ftot=ftot+f\n zres=zres+f1*(htot-(est.H)/2)+f2*(htot-(est.H)+2/3*(est.H))\n else:\n ftot=1/2*st*est.H\n zres=ftot*(est.H)*(2/3)\n \n \n\n\n\n\n count=+1\n\n\n print(esf_efec,esf_tot,(zres/ftot))\n \n\n\n","repo_name":"diegogonzalezc/muros_contencion","sub_path":"f_eq_rankine_plano.py","file_name":"f_eq_rankine_plano.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10045924001","text":"import copy\nimport math\n\nimport numpy as np\n\nfrom gym.envs.registration import register\nfrom gym.envs.toy_text.frozen_lake import MAPS\nfrom piecewise.dtype import DataSpaceBuilder, Dimension\n\nfrom ..environment import EnvironmentResponse, IEnvironment\nfrom .gym_environment import GymEnvironment\n\nMAPS[\"8x8-train\"] = \\\n [\"SSSSSSSS\",\n \"SSSSSSSS\",\n \"SSSHSSSS\",\n \"SSSSSHSS\",\n \"SSSHSSSS\",\n \"SHHSSSHS\",\n \"SHSSHSHS\",\n \"SSSHSSSG\"] \n\nMAPS[\"12x12-train\"] = \\\n [\"SSSSSSHSSSHS\",\n \"SSSSSSSSSSSS\",\n \"SSHSSSHSSHSS\",\n \"SSSSHSSSSSSH\",\n \"HHSSSHSHSSSH\",\n \"SHSSHSSSSHSS\",\n \"SSSSSSSSHSSS\",\n \"SHSSHHSSHSSH\",\n \"HSSSSSSHSSSH\",\n \"SSSSSSSSSHSS\",\n \"SSHSSSHSSSSS\",\n \"HSHSSSSSHSSG\"]\n\nMAPS[\"12x12-test\"] = \\\n [\"SFFFFFHFFFHF\",\n \"FFFFFFFFFFFF\",\n \"FFHFFFHFFHFF\",\n \"FFFFHFFFFFFH\",\n \"HHFFFHFHFFFH\",\n \"FHFFHFFFFHFF\",\n \"FFFFFFFFHFFF\",\n \"FHFFHHFFHFFH\",\n \"HFFFFFFHFFFH\",\n \"FFFFFFFFFHFF\",\n \"FFHFFFHFFFFF\",\n \"HFHFFFFFHFFG\"]\n\nregister(id=\"FrozenLake8x8-train-v0\",\n entry_point=\"gym.envs.toy_text:FrozenLakeEnv\",\n kwargs={\"map_name\": \"8x8-train\"})\n\nregister(id=\"FrozenLake12x12-train-v0\",\n entry_point=\"gym.envs.toy_text:FrozenLakeEnv\",\n kwargs={\"map_name\": \"12x12-train\"})\n\nregister(id=\"FrozenLake12x12-test-v0\",\n entry_point=\"gym.envs.toy_text:FrozenLakeEnv\",\n kwargs={\"map_name\": \"12x12-test\"})\n\n\ndef make_frozen_lake_8x8_train_env(slip_prob=0.0, seed=0):\n is_slippery = slip_prob > 0.0\n gym_env = GymEnvironment(env_name=\"FrozenLake8x8-train-v0\",\n env_kwargs={\"is_slippery\": is_slippery},\n custom_obs_space=None,\n custom_action_set=None,\n seed=seed)\n return FrozenLakeGymEnvironment(gym_env, grid_size=8, slip_prob=slip_prob)\n\n\ndef make_frozen_lake_8x8_test_env(slip_prob=0.0, seed=0):\n is_slippery = slip_prob > 0.0\n gym_env = GymEnvironment(env_name=\"FrozenLake8x8-v0\",\n env_kwargs={\"is_slippery\": is_slippery},\n custom_obs_space=None,\n custom_action_set=None,\n seed=seed)\n return FrozenLakeGymEnvironment(gym_env, grid_size=8, slip_prob=slip_prob)\n\ndef make_frozen_lake_12x12_train_env(slip_prob=0.0, seed=0):\n is_slippery = slip_prob > 0.0\n gym_env = GymEnvironment(env_name=\"FrozenLake12x12-train-v0\",\n env_kwargs={\"is_slippery\": is_slippery},\n custom_obs_space=None,\n custom_action_set=None,\n seed=seed)\n return FrozenLakeGymEnvironment(gym_env, grid_size=12, slip_prob=slip_prob)\n\n\ndef make_frozen_lake_12x12_test_env(slip_prob=0.0, seed=0):\n is_slippery = slip_prob > 0.0\n gym_env = GymEnvironment(env_name=\"FrozenLake12x12-test-v0\",\n env_kwargs={\"is_slippery\": is_slippery},\n custom_obs_space=None,\n custom_action_set=None,\n seed=seed)\n return FrozenLakeGymEnvironment(gym_env, grid_size=12, slip_prob=slip_prob)\n\n\nclass FrozenLakeGymEnvironment(IEnvironment):\n \"\"\"Decorator over GymEnvironment containing frozen lake to change the\n observations and observation space to be an (x, y) grid instead of simple\n numbered array of cells.\"\"\"\n def __init__(self, gym_env, grid_size, slip_prob):\n assert isinstance(gym_env, GymEnvironment)\n self._raw_env = gym_env\n self._grid_size = grid_size\n self._x_y_coordinates_obs_space = \\\n self._gen_x_y_coordinates_obs_space(self._grid_size)\n self._slip_prob = slip_prob\n self._alter_transition_func_if_needed(self._slip_prob)\n\n def _gen_x_y_coordinates_obs_space(self, grid_size):\n obs_space_builder = DataSpaceBuilder()\n for _ in range(2): # x, y\n obs_space_builder.add_dim(Dimension(0, grid_size - 1))\n return obs_space_builder.create_space()\n\n def _alter_transition_func_if_needed(self, slip_prob):\n if slip_prob > 0.0:\n self._alter_transition_func(slip_prob)\n\n def _alter_transition_func(self, slip_prob):\n assert 0.0 < slip_prob <= 1.0\n # slip prob is 2/3 by default - very high!\n P = self._raw_env._wrapped_env.P\n P_mut = copy.deepcopy(P)\n for state in range(self._raw_env._wrapped_env.nS):\n for action in range(self._raw_env._wrapped_env.nA):\n P_cell_raw = P[state][action]\n is_slippery_transition = len(P_cell_raw) == 3\n if is_slippery_transition:\n # middle tuple is the desired location, first\n # and last are non-desired locations\n (_, ns_1, r_1, done_1) = P_cell_raw[0]\n (_, ns_2, r_2, done_2) = P_cell_raw[1]\n (_, ns_3, r_3, done_3) = P_cell_raw[2]\n prob_non_desired = slip_prob / 2\n prob_desired = (1 - slip_prob)\n P_cell_mut = []\n if prob_non_desired != 0.0:\n P_cell_mut.append(\n (prob_non_desired, ns_1, r_1, done_1))\n P_cell_mut.append((prob_desired, ns_2, r_2, done_2))\n P_cell_mut.append(\n (prob_non_desired, ns_3, r_3, done_3))\n else:\n P_cell_mut.append((prob_desired, ns_2, r_2, done_2))\n else:\n P_cell_mut = P_cell_raw\n P_mut[state][action] = P_cell_mut\n self._raw_env._wrapped_env.unwrapped.P = P_mut\n\n @property\n def obs_space(self):\n return self._x_y_coordinates_obs_space\n\n @property\n def action_set(self):\n return self._raw_env.action_set\n\n @property\n def step_type(self):\n return self._raw_env.step_type\n\n @property\n def P(self):\n return self._raw_env._wrapped_env.P\n\n @property\n def grid_size(self):\n return self._grid_size\n\n @property\n def slip_prob(self):\n return self._slip_prob\n\n @property\n def terminal_states(self):\n desc = self._raw_env._wrapped_env.desc.flatten()\n terminal_states = [self._convert_raw_obs_to_x_y_coordinates([idx]) for\n (idx, letter) in enumerate(desc) if letter == b'H' or letter ==\n b'G']\n return terminal_states\n\n def reset(self):\n raw_obs = self._raw_env.reset()\n return self._convert_raw_obs_to_x_y_coordinates(raw_obs)\n\n def _convert_raw_obs_to_x_y_coordinates(self, raw_obs):\n # raw obs is number indicating idx into flattened grid, where 0 is top\n # left, and flattening is done left to right, top to bottom.\n # x is the column coordinate, y is the row coordinate, both starting\n # from 0.\n assert len(raw_obs) == 1\n obs_val = raw_obs[0]\n x = obs_val % self._grid_size\n y = math.floor(obs_val / self._grid_size)\n assert (y * self._grid_size + x) == obs_val\n return np.asarray([x, y])\n\n def step(self, action):\n raw_response = self._raw_env.step(action)\n return EnvironmentResponse(\n obs=self._convert_raw_obs_to_x_y_coordinates(raw_response.obs),\n reward=raw_response.reward,\n was_correct_action=raw_response.was_correct_action,\n is_terminal=raw_response.is_terminal)\n\n def is_terminal(self):\n return self._raw_env.is_terminal()\n","repo_name":"jtbish/piecewise","sub_path":"piecewise/environment/reinforcement/frozen_lake_environment.py","file_name":"frozen_lake_environment.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"22419636265","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nfrom collections import defaultdict\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n def build(start1,end1,start2,end2):\n if start1 > end1:\n return\n val = preorder[start2]\n idx1 = inorderDict[val]\n root = TreeNode(val)\n root.left = build(start1,idx1-1,start2+1,start2+(idx1-start1))\n root.right = build(idx1+1,end1,start2+1+(idx1-start1),end2)\n return root\n inorderDict = defaultdict()\n for idx,entry in enumerate(inorder):\n inorderDict[entry] = idx\n return build(0,len(inorder)-1,0,len(preorder)-1)\n","repo_name":"abhinay-b/Leetcode-Submissions","sub_path":"accepted_codes/Construct_Binary_Tree_from_Preorder_and_Inorder_Traversal/Construct Binary Tree from Preorder and Inorder Traversal_288270822.py","file_name":"Construct Binary Tree from Preorder and Inorder Traversal_288270822.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29219509559","text":"# client.py\nimport Pyro4\nimport uuid\nimport random\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn_extra.cluster import KMedoids\n\nfrom sklearn.metrics import pairwise_distances_argmin_min\nimport numpy as np;\n\n\n\n\n# Retrieve the server URI from the server machine\n# server_uri = input(\"Enter the server URI: \")\nserver_uri= \"PYRO:obj_f24a220fb31c4c12ac71894327698b45@localhost:53314\"\n\n# Generate a unique ID for this client\nclient_id = str(uuid.uuid4())\n\n# Create a Pyro proxy to the server object\nreceiver = Pyro4.Proxy(server_uri)\n\n# Register the client with the server\nreceiver.register_client(client_id)\nX=np.array(receiver.getData())\nk=receiver.getK()\nlabels=np.ones(X.shape[0])\ncenters=np.ones((k,receiver.getFeaturesSize()))\ninertia=np.ones(1)\ncenters=X[np.random.choice(range(len(X)), size=k, replace=False)]\n\n#get data\nwhile(not receiver.getConvergence()):\n centers=X[np.random.choice(range(len(X)), size=k, replace=False)]\n kmeans = KMeans(n_clusters=k,init=np.array(centers),max_iter=30)\n kmeans.fit(X)\n labels=kmeans.labels_\n centers= kmeans.cluster_centers_\n inertia=kmeans.inertia_\n centers=kmeans.cluster_centers_.tolist()\n labelss=kmeans.labels_.tolist()\n inertia=kmeans.inertia_\n receiver.sendResult(labelss,inertia,client_id)","repo_name":"CHENNI-Nidhaleddine/KMEANS_KMEDOIDS_HYPRIDATION_AND_DISTRIBUTION","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70029487763","text":"from pytube import YouTube\nfrom .ClipboardListener import ClipboardWatcher\nfrom .converter import convert2mp3\nfrom pathlib import Path\nimport time\n\n# where to save\nSAVE_PATH = Path(__file__).parent / \"downloads\"\n\n\ndef is_youtube_url(url:str):\n return any([pat in url for pat in [\"youtube\", \"youtu.be\"]]) and (url.lower().startswith(\"http://\") or\n url.lower().startswith(\"https://\") or\n url.lower().startswith(\"www.\"))\n\n\ndef ytdownloader(url):\n try:\n yt = YouTube(url)\n except Exception as e:\n print(\"Connection Error\") # to handle exception\n print(e)\n\n try:\n # for debugging:\n # for stream in yt.streams.filter(file_extension='mp4').order_by('abr').desc():\n # print(stream)\n\n stream = yt.streams.filter(file_extension='mp4').order_by('abr').desc()[1]\n print(f'Downloading {stream}')\n file_path = stream.download(SAVE_PATH)\n\n print(f'Converting {file_path}')\n convert2mp3(file_path)\n except Exception as e:\n print(e)\n\n\ndef main():\n watcher = ClipboardWatcher(is_youtube_url,\n ytdownloader,\n 1.)\n watcher.start()\n print(\"Waiting for clipboard link...\")\n while True:\n try:\n time.sleep(10)\n except KeyboardInterrupt:\n watcher.stop()\n break\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"lemelech/ytdownloader","sub_path":"src/ytdownloader/ytdownloader.py","file_name":"ytdownloader.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25811683059","text":"age = input(\"Pleae input an age: \")\n\nif age < 2:\n\tstage = 'baby'\nelif age < 4:\n\tstage = 'toddler'\nelif age < 13:\n\tstage = 'kid'\nelif age < 20:\n\tstage = 'teenager'\nelif age < 65:\n\tstage = 'adult'\nelse:\n\tstage = 'elder'\n\nprint(\"This person is an \" + stage + \".\")\n","repo_name":"warriorforGod/python","sub_path":"PCC/ch5/life_stages.py","file_name":"life_stages.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10949228693","text":"import random\n\nfrom openerp.osv import osv\n\n\nclass StockPicking(osv.osv):\n _inherit = 'stock.picking'\n\n def do_transfer(self, cr, uid, ids, context=None):\n\n do_transfer = super(StockPicking, self).do_transfer(\n cr, uid, ids, context)\n\n if do_transfer:\n stock_picking_id = ids[0]\n stock_picking = self.browse(cr, uid, stock_picking_id, context)\n dispenser_user_id = stock_picking.dispenser_user_id.id\n stock_picking_min_date = stock_picking.min_date\n\n domain_search = [\n ('picking_type_code', '=', 'outgoing'),\n ('dispenser_user_id', '=', False),\n ('min_date', '>=', stock_picking_min_date),\n ('state', 'in', ['assigned', 'partially_available']),\n ]\n\n StockDispenser = self.pool.get('stock.dispenser')\n\n dispensers_activated_and_free = StockDispenser.search(\n cr,\n uid,\n [('active_and_free', '=', True)],\n context=context\n )\n\n next_pickings_ids_without_dispenser = self.search(\n cr,\n uid,\n domain_search,\n context=context,\n order='min_date',\n limit=len(dispensers_activated_and_free)+1)\n\n if next_pickings_ids_without_dispenser:\n\n if dispensers_activated_and_free:\n\n for pos in range(\n 0, len(next_pickings_ids_without_dispenser)):\n\n next_picking_id = \\\n next_pickings_ids_without_dispenser[pos]\n\n self.write(\n cr,\n uid,\n next_picking_id,\n {\n 'dispenser_user_id':\n dispensers_activated_and_free[pos]\n },\n context)\n\n if len(next_pickings_ids_without_dispenser) == len(\n dispensers_activated_and_free) + 1:\n\n next_picking_id = next_pickings_ids_without_dispenser[-1]\n self.write(\n cr,\n uid,\n next_picking_id,\n {'dispenser_user_id': dispenser_user_id},\n context)\n\n return do_transfer\n","repo_name":"humanytek-team/stock_assign_dispenser_next_picking_out","sub_path":"models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17623280181","text":"import os\nimport sys\nimport numpy as np\nimport math\nimport random\nimport json\nimport sys\nimport os.path as osp\n\nfrom env.my_planar_robot import MyPlanarRobot\n# from config import ROOT_DI\n\nROOT_DIR = osp.join(osp.dirname(osp.abspath(__file__)), \"../\")\nDATASET_DIR = osp.join(ROOT_DIR, \"dataset\")\n\n# -------------- Settings ----------------\nRANDOM = True\nTOTAL_START_GOAL_CNT = 50\nMAZE_SIZE = 5\nOCC_GRID_RESOLUTION = 0.1\n\nclass Maze2D():\n def __init__(self):\n self.obstacles = []\n\n # load robot\n robot = MyPlanarRobot(base_xy_bounds = MAZE_SIZE / 2.0)\n self.robot = robot\n\n # 2d occupancy grid\n self.occ_grid_size = int(MAZE_SIZE / OCC_GRID_RESOLUTION)\n self.occ_grid = np.zeros((self.occ_grid_size, self.occ_grid_size), dtype=np.uint8)\n self.small_occ_grid_size = 10\n\n # add surrounding walls\n half_size = MAZE_SIZE / 2.0\n # add wall\n self.add_box([half_size + 0.1, 0, 1], [0.1, half_size, 1])\n self.add_box([-half_size - 0.1, 0, 1], [0.1, half_size, 1])\n self.add_box([0, half_size + 0.1, 1], [half_size, 0.1, 1])\n self.add_box([0, -half_size - 0.1, 1], [half_size, 0.1, 1])\n\n # internal attributes\n self.goal_robot_id = None\n self.path = None\n self.approx_path = None\n self.sg_pairs = None\n\n self.obstacle_dict = {}\n\n def clear_obstacles(self):\n self.occ_grid.fill(0)\n self.obstacle_dict = {}\n self.inflated_occ_grid = None\n\n def random_obstacles(self, num_of_boxes = 8):\n # add random obstacles with boxes.\n # box_positions = [(-2.25, 2.25)]\n box_positions = []\n\n for _ in range(num_of_boxes):\n x = random.randint(0, 4)\n y = random.randint(0, 4)\n x = x - 2\n y = y - 2\n box_positions.append((x, y))\n\n # print(box_positions)\n for box_pos in box_positions:\n self.add_box([box_pos[0], box_pos[1], 0.5], [0.5, 0.5, 0.5])\n\n self.obstacle_dict[\"box\"] = box_positions\n\n self.get_inflated_occ_grid()\n\n def add_box(self, box_pos, half_box_size):\n # for occupancy grid, center is at upper left corner, unit is cm\n half_size = MAZE_SIZE / 2.0\n tmp = int(1 / OCC_GRID_RESOLUTION)\n cx = (-box_pos[1] + half_size) * tmp\n cy = (box_pos[0] + half_size) * tmp\n x_size = half_box_size[1] * tmp\n y_size = half_box_size[0] * tmp\n for x in range(max(0, int(cx - x_size)), min(self.occ_grid_size, int(cx + x_size))):\n for y in range(max(0, int(cy - y_size)), min(self.occ_grid_size, int(cy + y_size))):\n self.occ_grid[x, y] = 1\n\n def get_occupancy_grid(self):\n return self.occ_grid\n\n def get_small_occupancy_grid(self):\n occ_grid_small = np.zeros((10, 10), dtype=np.int8)\n for i in range(10):\n for j in range(10):\n occ_grid_small[i, j] = (np.max(self.occ_grid[i*5:(i+1)*5, j*5:(j+1)*5]) == 1)\n\n return occ_grid_small\n\n def get_obstacle_dict(self):\n return self.obstacle_dict.copy()\n\n def load_obstacle_dict(self, obstacle_dict):\n if \"box\" in obstacle_dict:\n for box_pos in obstacle_dict[\"box\"]:\n self.add_box([box_pos[0], box_pos[1], 0.5], [0.5, 0.5, 0.5])\n\n self.obstacle_dict = obstacle_dict\n\n def sample_start_goal(self):\n while True:\n start = [0] * self.robot.num_dim\n goal = [0] * self.robot.num_dim\n low_bounds = self.robot.get_joint_lower_bounds()\n high_bounds = self.robot.get_joint_higher_bounds()\n for i in range(self.robot.num_dim):\n start[i] = random.uniform(low_bounds[i], high_bounds[i])\n goal[i] = random.uniform(low_bounds[i], high_bounds[i])\n\n if self.is_state_valid(start) and self.is_state_valid(goal):\n self.start = start\n self.goal = goal\n break\n\n print(\"Maze2d: start: {}\".format(self.start))\n print(\"Maze2d: goal: {}\".format(self.goal))\n\n def get_inflated_occ_grid(self):\n if self.inflated_occ_grid is None:\n tmp = np.zeros((self.occ_grid_size + 2, self.occ_grid_size + 2), dtype=np.uint8)\n tmp[:self.occ_grid_size, :self.occ_grid_size] += self.occ_grid\n tmp[1:self.occ_grid_size + 1, :self.occ_grid_size] += self.occ_grid\n tmp[2:, :self.occ_grid_size] += self.occ_grid\n tmp[:self.occ_grid_size, 1:self.occ_grid_size+1] += self.occ_grid\n tmp[1:self.occ_grid_size + 1, 1:self.occ_grid_size+1] += self.occ_grid\n tmp[2:, 1:self.occ_grid_size+1] += self.occ_grid\n tmp[:self.occ_grid_size, 2:] += self.occ_grid\n tmp[1:self.occ_grid_size + 1, 2:] += self.occ_grid\n tmp[2:, 2:] += self.occ_grid\n tmp[tmp > 0] = 1\n\n self.inflated_occ_grid = tmp[1:self.occ_grid_size + 1, 1:self.occ_grid_size + 1]\n\n def is_state_valid(self, robot_state):\n # Inflate for collision checking\n self.get_inflated_occ_grid()\n\n y, x = robot_state[0], robot_state[1]\n x = int((MAZE_SIZE / 2.0 - x) / 0.1)\n y = int((y + MAZE_SIZE / 2.0) / 0.1)\n\n res = (self.inflated_occ_grid[x, y] != 1)\n return res\n\nif __name__ == '__main__':\n sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../'))\n import utils\n import cv2\n\n maze = Maze2D()\n maze.random_obstacles()\n\n occ_grid = maze.get_occupancy_grid()\n # print(occ_grid)\n tmp = np.copy(occ_grid).reshape(50, 50, 1)\n tmp[tmp == 1] = 255\n cv2.imshow(\"tmp\", tmp)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n occ_grid = maze.inflated_occ_grid\n tmp = np.copy(occ_grid).reshape(50, 50, 1)\n tmp[tmp == 1] = 255\n cv2.imshow(\"tmp\", tmp)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n occ_grid = maze.get_small_occupancy_grid()\n print(occ_grid.shape)\n utils.visualize_nodes(occ_grid, [], None, None)\n\n print(maze.is_state_valid([-1.9, 2.0]))\n print(maze.is_state_valid([-1.9, 1.9]))\n print(maze.is_state_valid([-2.0, 1.9]))\n print(maze.is_state_valid([-2.0, 2.0]))","repo_name":"lyfkyle/CS5242-neural-motion-planning","sub_path":"env/maze_2d.py","file_name":"maze_2d.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38015911857","text":"def normalize_polish(text: str) -> str:\n \"\"\"Convert text to lowercase. Replace polish letters with roman alphabet.\n\n Examples:\n Kość -> kosc\n WąS -> was\n\n Args:\n text (str): text to be normalized\n\n Returns:\n str: normalized text\n \"\"\"\n letters_map = {\n \"ą\": \"a\",\n \"ć\": \"c\",\n \"ę\": \"e\",\n \"ł\": \"l\",\n \"ń\": \"n\",\n \"ś\": \"s\",\n \"ó\": \"o\",\n \"ż\": \"z\",\n \"ź\": \"z\",\n }\n temp = text.lower()\n for letter, replacement in letters_map.items():\n temp = temp.replace(letter, replacement)\n return temp\n","repo_name":"rolzwy7/wykladowcav2","sub_path":"src/core/libs/normalizers/normalize_polish.py","file_name":"normalize_polish.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42516203765","text":"########################################################################################################################\n#\n# Get Products for Europe API call\n#\n# Date created: 24.02.2022\n# Date last modified: 24.02.2022\n#\n# __author__ = Johannes Schmid (schmid@geoville.com)\n# __version__ = 22.02\n#\n########################################################################################################################\n\nfrom check_message.check_message import check_message\nfrom error_classes.http_error_400.http_error_400 import BadRequestError\nfrom error_classes.http_error_404.http_error_404 import NotFoundError\nfrom error_classes.http_error_500.http_error_500 import InternalServerErrorAPI\nfrom error_classes.http_error_503.http_error_503 import ServiceUnavailableError\nfrom flask_restx import Resource\nfrom geoville_ms_database.geoville_ms_database import execute_database\nfrom geoville_ms_logging.geoville_ms_logging import gemslog, LogLevel\nfrom geoville_ms_orderid_generator.generator import generate_orderid\nfrom init.init_env_variables import database_config_file, database_config_section_api\nfrom init.namespace_constructor import service_namespace as api\nfrom lib.auth_header import auth_header_parser\nfrom lib.database_helper import check_user_existence, get_service_id\nfrom models.general_models.general_models import service_success_response_model\nfrom models.models_error.http_error_400 import error_400_model\nfrom models.models_error.http_error_401 import error_401_model\nfrom models.models_error.http_error_403 import error_403_model\nfrom models.models_error.http_error_404 import error_404_model\nfrom models.models_error.http_error_408 import error_408_model\nfrom models.models_error.http_error_500 import error_500_model\nfrom models.models_error.http_error_503 import error_503_model\nfrom models.models_products.products_models import european_products_request_model, products_success_response_model\nfrom oauth.oauth2 import require_oauth\nimport json\nimport traceback\n\n\n########################################################################################################################\n# Resource definition for the get-products-europe API call\n########################################################################################################################\n\n@api.expect(european_products_request_model)\n@api.header('Content-Type', 'application/json')\nclass ProductEurope(Resource):\n \"\"\" Class for handling the POST request\n\n This class defines the API call for getting the specified product for entire Europe.\n The class consists of one method which accepts a POST request. For the POST request the user ID is required,\n defined in the corresponding model.\n\n \"\"\"\n\n ####################################################################################################################\n # Method for handling the POST request\n ####################################################################################################################\n\n @require_oauth(['admin', 'user', 'get_product'])\n @api.expect(auth_header_parser)\n @api.response(200, 'Success', products_success_response_model)\n @api.response(400, 'Validation Error', error_400_model)\n @api.response(401, 'Unauthorized', error_401_model)\n @api.response(403, 'Forbidden', error_403_model)\n @api.response(404, 'Not Found', error_404_model)\n @api.response(408, 'Request Timeout', error_408_model)\n @api.response(500, 'Internal Server Error', error_500_model)\n @api.response(503, 'Service Unavailable', error_503_model)\n def post(self):\n \"\"\" POST definition for requesting the specified product for entire Europe\n\n

This method defines the handler of the POST request for getting the\n specified product for entire Europe. It is a synchronous call and thus, it returns the requested data\n immediately. To access the service it is necessary to generate a valid Bearer\n token with sufficient access rights, otherwise the request will return a HTTP status code 401 or 403. In case of\n those errors, please contact the GeoVille service team for any support.

\n\n
Description:\n\n
Request headers:\n \n\n
Request payload:\n \n\n
Result:\n

After the request was successful, a download link will be returned which\n provides the ordered file.

\n\n \"\"\"\n\n order_id = None\n\n try:\n req_args = api.payload\n\n payload_check = check_message(req_args)\n\n if not payload_check[0]:\n error = BadRequestError(f'Payload failed the GeoVille standards: {payload_check[1]}', '', '')\n gemslog(LogLevel.WARNING, f\"'message': {error.to_dict()}\", 'API-products-europe', order_id)\n return {'message': error.to_dict()}, 404\n\n if not check_user_existence(req_args['user_id'], database_config_file, database_config_section_api):\n error = NotFoundError('User ID does not exist', '', '')\n gemslog(LogLevel.WARNING, f\"'message': {error.to_dict()}\", 'API-products-europe', order_id)\n return {'message': error.to_dict()}, 404\n\n service_id = get_service_id(\"get_product_europe\", database_config_file, database_config_section_api)\n order_id = generate_orderid(req_args['user_id'], service_id, json.dumps(req_args))\n gemslog(LogLevel.INFO, f'Request payload: {req_args}', 'API-products-europe', order_id)\n\n update_query = \"\"\"UPDATE customer.service_orders\n set status = 'RECEIVED'\n WHERE\n order_id = %s;\n \"\"\"\n execute_database(update_query, (order_id,), database_config_file, database_config_section_api, True)\n\n dll = \"https://s3.waw2-1.cloudferro.com/swift/v1/AUTH_b9657821e4364f88862ca20a180dc485/clcplus-public/\" \\\n \"products/CLMS_CLCplus_RASTER_2018_010m_eu_03035_V1_1.tif\"\n\n db_query = \"\"\"UPDATE \n customer.service_orders \n SET \n status = 'SUCCESS', \n result = %s,\n success = true,\n order_started = NOW(),\n order_received = NOW(), \n order_stopped = NOW() \n WHERE order_id = %s\n \"\"\"\n execute_database(db_query, (dll, order_id), database_config_file, database_config_section_api, True)\n\n except KeyError as err:\n error = BadRequestError(f'Key error resulted in a BadRequest: {err}', api.payload, traceback.format_exc())\n gemslog(LogLevel.WARNING, f\"'message': {error.to_dict()}\", 'API-products-europe', order_id)\n return {'message': error.to_dict()}, 400\n\n except AttributeError:\n error = ServiceUnavailableError('Could not connect to the database server', '', '')\n gemslog(LogLevel.ERROR, f\"'message': {error.to_dict()}\", 'API-products-europe', order_id)\n return {'message': error.to_dict()}, 503\n\n except Exception:\n error = InternalServerErrorAPI('Unexpected error occurred', api.payload, traceback.format_exc())\n gemslog(LogLevel.ERROR, f\"'message': {error.to_dict()}\", 'API-products-europe', order_id)\n return {'message': error.to_dict()}, 500\n\n else:\n gemslog(LogLevel.INFO, f'Request successful', 'API-products-europe', order_id)\n return {\n 'result': dll\n }, 200\n","repo_name":"eea/CLMS_Production_System","sub_path":"03_Service_and_Data_Dissemination_API/services/backend_api/src/resources/resources_products/get_product_europe/get_product_europe.py","file_name":"get_product_europe.py","file_ext":"py","file_size_in_byte":8092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28353856870","text":"from typing import Optional\n\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom IPython.display import display, Image\n\nfrom .utils import generate_images_with_generator, generate_noise\n\n\ndef show_mnist_data(batch_of_images: np.array) -> None:\n im = torchvision.utils.make_grid(batch_of_images)\n plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))\n\n\ndef visualize_generator_outputs(\n generator: nn.Module,\n latent_dim: int,\n epoch: Optional[str] = None,\n output_path: Optional[str] = None,\n latent_type: str = \"uniform\",\n img_size: int = 32,\n batch_size: int = 8,\n save: bool = False,\n device: str = \"cpu\",\n):\n output = generate_images_with_generator(\n generator=generator,\n batch_size=batch_size,\n latent_dim=latent_dim,\n latent_type=latent_type,\n img_size=img_size,\n device=device,\n )\n\n fig = plt.figure(figsize=(batch_size, batch_size))\n if epoch is not None:\n fig.suptitle(f\"Generated digits from latent space at epoch {epoch}\")\n else:\n fig.suptitle(\"Generated digits from latent space\")\n\n gridspec = fig.add_gridspec(batch_size, batch_size)\n for idx in range(batch_size**2):\n ax = fig.add_subplot(gridspec[idx])\n ax.imshow(output[idx], cmap=\"gray\")\n ax.set_axis_off()\n\n if save:\n plt.savefig(output_path)\n\n\ndef create_gif(\n batch_size: int,\n generator: nn.Module,\n latent_dim: int,\n latent_type: str,\n gif_path: str,\n inputs: Optional[np.array] = None,\n img_size: int = 32,\n device: str = \"cpu\",\n std: float = 1.0,\n mean: float = 0.0,\n normalization: bool = False,\n) -> None:\n if inputs is None:\n inputs = generate_images_with_generator(\n generator=generator,\n batch_size=batch_size,\n latent_dim=latent_dim,\n latent_type=latent_type,\n img_size=img_size,\n device=device,\n )\n\n # conversion\n if normalization:\n inputs = inputs * std + mean\n inputs = inputs * 256\n inputs = inputs.astype(np.uint8)\n\n if type(inputs) != np.array:\n inputs = np.array(inputs)\n\n # write gif\n imageio.mimwrite(gif_path, inputs, fps=1)\n\n\ndef display_gif(gif_path: str, img_size: int = 200) -> None:\n with open(gif_path, \"rb\") as f:\n img = Image(data=f.read(), format=\"png\", width=img_size, height=img_size)\n display(img)\n\n\ndef get_interpolation_image(\n generator: nn.Module,\n nb_images: int,\n latent_dim: int,\n latent_type: str,\n device: str,\n) -> None:\n start1_noise = generate_noise(\n batch_size=1,\n latent_dim=latent_dim,\n latent_type=latent_type,\n device=device,\n )\n end1_noise = generate_noise(\n batch_size=1,\n latent_dim=latent_dim,\n latent_type=latent_type,\n device=device,\n )\n start2_noise = generate_noise(\n batch_size=1,\n latent_dim=latent_dim,\n latent_type=latent_type,\n device=device,\n )\n end2_noise = generate_noise(\n batch_size=1,\n latent_dim=latent_dim,\n latent_type=latent_type,\n device=device,\n )\n\n vectors = []\n alphas = torch.linspace(0, 1, int(nb_images**0.5))\n\n # linear interpolation\n for alpha1 in alphas:\n for alpha2 in alphas:\n v = (\n start1_noise * (1 - alpha1)\n + end1_noise * alpha1\n + start2_noise * (1 - alpha2)\n + end2_noise * alpha2\n ) / 2\n vectors.append(v)\n\n vectors = torch.vstack(vectors)\n images_generated = generator(vectors).detach().numpy()\n\n size = int(np.sqrt(nb_images))\n\n fig, axes = plt.subplots(size, size, figsize=(size, size))\n fig.suptitle(\"Interpolation between 4 digits\")\n for ax, img in zip(axes.flatten(), images_generated):\n ax.imshow(img[0], cmap=\"gray\", interpolation=\"nearest\")\n ax.axis(\"off\")\n","repo_name":"chloeskt/ot-gan-ensae","sub_path":"source/utils/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11574261041","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef test_interpolate(mesh, plot):\n x, y = (mesh._x, mesh._y)\n\n # Test all the points on the boundary are equal to the points calculated\n # directly using the BoundaryCurves\n xis = np.linspace(0, 1, mesh._num_xi)\n xl, yl = mesh.left.curve(xis)\n xr, yr = mesh.right.curve(xis)\n xt, yt = mesh.top.curve(xis)\n xb, yb = mesh.bottom.curve(xis)\n\n assert np.allclose(x[0, :], xl) and np.allclose(y[0, :], yl)\n assert np.allclose(x[-1, :], xr) and np.allclose(y[0, :], yr)\n assert np.allclose(x[:, 0], xb) and np.allclose(y[:, 0], yb)\n assert np.allclose(x[:, -1], xt) and np.allclose(y[:, -1], yt)\n\n plt.figure()\n plt.plot(x, y, \"k.\")\n mesh.left.curve.plot()\n mesh.bottom.curve.plot()\n mesh.right.curve.plot()\n mesh.top.curve.plot()\n plt.axis(\"equal\")\n plt.show(block=False)\n\n\ndef test_ghost_centroids(mesh, tol):\n # Test that the ghosts centroids have distance one from the corresponding\n # boundary cells\n\n for boundary in mesh.boundaries:\n boundary_idx = boundary.cells_idx\n ghost_idx = boundary.ghost_cells_idx\n\n boundary_centroids = mesh.cells._centroids[boundary_idx[0], boundary_idx[1]]\n\n # Compute the ghost cells centroids\n ghost_centroids = mesh.cells._centroids[ghost_idx[0], ghost_idx[1]]\n\n # The distance (lenght of the relative vector between boundary cells\n # and related ghost cell) must be min_length\n distances = ghost_centroids - boundary_centroids\n assert np.mean(np.linalg.norm(distances, axis=-1)) - mesh.cells.min_length < tol\n\n\ndef test_write(tmp_path, mesh):\n mesh.generate()\n file = tmp_path / \"test.xdmf\"\n mesh.write(file.as_posix())\n\n\ndef test_plot(mesh, plot):\n mesh.plot()\n","repo_name":"hpc-maths/josiepy","sub_path":"tests/test_mesh.py","file_name":"test_mesh.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29037065767","text":"import yatest\nimport json\nimport pytest\nimport requests_mock\nimport re\n\nimport ads.watchman.timeline.api.tests.helpers.model_generators as model_generators\nimport ads.watchman.timeline.api.lib.app as app\nfrom ads.watchman.timeline.api.lib.modules.events import db_manager, dao as event_dao, models as event_models, resource_manager\n\nMOCK_RESPONSE_FILE = yatest.common.source_path('ads/watchman/timeline/api/tests/resources/startrek/dictionary_with_mock_responces.json')\nFIELDS_FILE = yatest.common.source_path('ads/watchman/timeline/api/tests/resources/startrek/fields.json')\n\n\ndef mock_responses():\n with open(MOCK_RESPONSE_FILE, 'rb') as f:\n return json.load(f)\n\n\nMOCK_RESPONSES = mock_responses()\nTEST_EVENT = MOCK_RESPONSES['event']\nTEST_ISSUE = MOCK_RESPONSES['issue']\nTEST_STARTREK_NOTIFICATION = MOCK_RESPONSES['notification']\nTEST_TRANSITION = MOCK_RESPONSES['transition']\n\n\n@pytest.fixture\ndef net_mock():\n with requests_mock.mock() as m:\n with open(FIELDS_FILE, 'rb') as f:\n fields_json = json.load(f)\n\n m.get(\"https://st-api.yandex-team.ru/v2/fields/\", json=fields_json)\n m.get(TEST_ISSUE['self'], json=TEST_ISSUE)\n yield m\n\n\ndef set_up_valid_fiasco_event(net_mock):\n net_mock.get(TEST_EVENT['self'], json=TEST_EVENT)\n\n\ndef test_put_fiasco_with_startrek_hook(db_session, net_mock):\n set_up_valid_fiasco_event(net_mock)\n\n matcher = re.compile(TEST_ISSUE[\"self\"] + \"/comments.*\")\n net_mock.post(matcher)\n net_mock.get(TEST_TRANSITION['self'], json=TEST_TRANSITION)\n net_mock.register_uri(requests_mock.ANY, TEST_TRANSITION['self'] + \"/_execute\", json=TEST_TRANSITION)\n\n db_manager.TimelineDBManager(db_session).sync_enums(resource_manager.ResourceManager())\n config = model_generators.TestingConfig()\n config.DAO_INIT = lambda: db_session\n config.DAO_CLASS = event_dao.SqlDao\n\n test_app = app.make_app(config).test_client()\n test_app.post('/tickets/fiasco', data=json.dumps(TEST_STARTREK_NOTIFICATION), content_type=u'application/json')\n assert len(event_dao.SqlDao(db_session).get_events(event_models.Filter())) == 1\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"ads/tests/integration_tests/tickets_integration_tests.py","file_name":"tickets_integration_tests.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24836843277","text":"'''\n# This is a class for performing data statistics, supporting to calculate the mean, median, and mode of a given data set.\n\nfrom collections import Counter\n\nclass DataStatistics:\n def mean(self, data):\n \"\"\"\n Calculate the average value of a group of data, accurate to two digits after the Decimal separator\n :param data:list, data list\n :return:float, the mean value\n >>> ds = DataStatistics()\n >>> ds.mean([1, 2, 3, 4, 5])\n 3.00\n \"\"\"\n\n def median(self, data):\n \"\"\"\n Calculate the median of a group of data, accurate to two digits after the Decimal separator\n :param data:list, data list\n :return:float, the median value\n >>> ds = DataStatistics()\n >>> ds.median([2, 5, 1, 3, 4])\n 3.00\n \"\"\"\n\n def mode(self, data):\n \"\"\"\n Calculate the mode of a set of data\n :param data:list, data list\n :return:float, the mode\n >>> ds = DataStatistics()\n >>> ds.mode([2, 2, 3, 3, 4])\n [2, 3]\n \"\"\"\n'''\n\nfrom collections import Counter\n\n\nclass DataStatistics:\n def mean(self, data):\n return round(sum(data) / len(data), 2)\n\n def median(self, data):\n sorted_data = sorted(data)\n n = len(sorted_data)\n if n % 2 == 0:\n middle = n // 2\n return round((sorted_data[middle - 1] + sorted_data[middle]) / 2, 2)\n else:\n middle = n // 2\n return sorted_data[middle]\n\n def mode(self, data):\n counter = Counter(data)\n mode_count = max(counter.values())\n mode = [x for x, count in counter.items() if count == mode_count]\n return mode\n","repo_name":"FudanSELab/ClassEval","sub_path":"data/benchmark_solution_code/DataStatistics.py","file_name":"DataStatistics.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"3"} +{"seq_id":"22893490360","text":"def seg(hora, minuto, segundo):\r\n minuto_acumulado = hora * 60\r\n segundo_acumulado = (minuto_acumulado + minuto) * 60\r\n segundo_final = segundo_acumulado + segundo\r\n\r\n return segundo_final\r\n\r\nhora = int(input(\"Digite hora: \"))\r\nminutos = int(input(\"Digite minutos: \"))\r\nsegundos = int(input(\"Digite segundos: \"))\r\n\r\nprint(seg(hora,minutos,segundos))","repo_name":"mateusascacibas/Python-Functions","sub_path":"Exe06.py","file_name":"Exe06.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13070374396","text":"'''\n- Extend the node class to allow a value to have extra data item (e.g. use it for taks IDs)\n- The node (priority) will have a list of tasks\n- For efficiency, we add/remove from the back only to be O(1)\n- Change insert function for this logic\n- These are nice generic feature\n\n- Build PriorityQueue based on that\n- We use a single node per priority, and add its tasks to the list\n- Deque just get a task from the list. If no more items, we just remove the node\n\n- Design cons: the design assumes the prioirty class knows how STL is implemented.\n- This is not good. But let's keep things simple here\n'''\n\n\nclass Node:\n def __init__(self, val=None, attached_data = None, left=None, right=None):\n self.val = val # used to compare nodes\n if attached_data is None:\n self.data_list = []\n else:\n self.data_list = [attached_data]\n self.left = left\n self.right = right\n self.height = 0 # 0 for leaf\n\n def ch_height(self, node): # child height\n if not node:\n return -1 # -1 for null\n return node.height # 0 for leaf\n\n def update_height(self): # call in end of insert function\n self.height = 1 + max(self.ch_height(self.left), self.ch_height(self.right))\n\n def balance_factor(self):\n return self.ch_height(self.left) - self.ch_height(self.right)\n\n def is_leaf(self):\n return not self.left and not self.right\n\n\nclass AVLTree:\n def __init__(self, value, attached_data = None):\n self.root = Node(value, attached_data)\n\n def _right_rotation(self, Q):\n print(\"right_rotation\", Q.val)\n P = Q.left\n Q.left = P.right\n P.right = Q\n Q.update_height()\n P.update_height()\n return P\n\n def _left_rotation(self, P):\n print(\"left_rotation\", P.val)\n Q = P.right\n P.right = Q.left\n Q.left = P\n P.update_height()\n Q.update_height()\n return Q\n\n def balance(self, node):\n if node.balance_factor() == 2: # Left\n if node.left.balance_factor() == -1: # Left Right\n node.left = self._left_rotation(node.left) # To Left Left\n\n node = self._right_rotation(node) # Balance Left Left\n elif node.balance_factor == -2:\n if node.right.balance_factor() == 1:\n node.right = self._right_rotation(node.right)\n\n node = self._left_rotation(node)\n\n return node\n\n def insert(self, val, attached_data = None):\n def process(current, val, attached_data):\n if val < current.val:\n if not current.left:\n current.left = Node(val, attached_data)\n else:\n # change left. update left as it might be balanced\n current.left = process(current.left, val, attached_data)\n elif val > current.val:\n if not current.right:\n current.right = Node(val, attached_data)\n else:\n current.right = process(current.right, val, attached_data)\n else: # found: add an extra element\n current.data_list.append(attached_data)\n\n current.update_height()\n return self.balance(current)\n\n self.root = process(self.root, val, attached_data)\n\n def inorder(self, current):\n def process(current):\n if not current:\n return\n process(current.left)\n lst.append(current.val)\n process(current.right)\n\n lst = []\n process(current)\n return lst\n\n # the inorder traversal must be: 1) sorted 2) unique\n def isValidBST(self, current):\n lst = self.inorder(current.root)\n # We can do the comparison without loop\n for idx in range(1, len(lst)):\n if lst[idx - 1] >= lst[idx]:\n return False\n return True\n\n\n ##################################\n\n def min_node(self, cur):\n while cur and cur.left:\n cur = cur.left\n return cur\n\n def delete(self, val):\n def process(current, val):\n if not current:\n return\n\n if val < current.val: # Value on the left side\n # the left subtree will be changed. This can be left itself\n current.left = process(current.left, val) # must link\n return current\n\n if val > current.val: # Value on the right side\n current.right = process(current.right, val)\n return current\n\n # we found the node: we have 3 cases\n if current.is_leaf(): # case 1: leaf\n return None # Just remove\n\n if not current.right: # case 2: has left only\n current = current.left\n elif not current.left: # case 2: has right only\n current = current.right\n else:\n # 2 children: Use successor\n mn = self.min_node(current.right)\n current.val = mn.val # copy data\n current.right = process(current.right, mn.val)\n\n current.update_height()\n return self.balance(current)\n\n self.root = process(self.root, val)\n\n def max_node(self):\n cur = self.root\n while cur and cur.right:\n cur = cur.right\n return cur\n\n\nclass PriorityQueue:\n def __init__(self):\n self.avl = AVLTree(-1)\n self.items_cnt = 0\n\n def enqueue(self, task_id, priority):\n self.items_cnt += 1\n self.avl.insert(priority, attached_data=task_id)\n\n def dequeue(self):\n assert not self.empty()\n self.items_cnt -= 1\n\n bst_node = self.avl.max_node()\n # To keep dequeue O(logn) although node have several tasls\n # We will always push and pop from back in O(1)\n item = bst_node.data_list.pop()\n\n if not bst_node.data_list: # no more elements\n self.avl.delete(bst_node.val)\n\n return item\n\n\n def empty(self):\n return self.items_cnt == 0\n\n\nif __name__ == '__main__':\n\n tasks = PriorityQueue()\n\n tasks.enqueue(1131, 1)\n tasks.enqueue(3111, 3)\n tasks.enqueue(2211, 2)\n tasks.enqueue(3161, 3)\n\n print(tasks.dequeue()) # 3161\n print(tasks.dequeue()) # 3111\n\n tasks.enqueue(1535, 1)\n tasks.enqueue(2815, 2)\n tasks.enqueue(3845, 3)\n tasks.enqueue(3145, 3)\n\n while not tasks.empty():\n print(tasks.dequeue(), end=' ')\n # 3145 3845 2815 2211 1535 1131\n\n","repo_name":"vZyx/Python-notes","sub_path":"-DSA/11 AVL Tree/06 Homework 1 - 6 Medium to Hard Challenges/04_priority_queue_using_avl.py","file_name":"04_priority_queue_using_avl.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33743408987","text":"\"\"\"An implementation of quick sort, based on the pseudocode provided in the \"Implementing Quick Sort\" lesson\"\"\"\n\ndef partition(array: list, start: int, end: int):\n pivot_pointer = start\n pivot = array[start]\n\n left_pointer = start + 1\n right_pointer = end\n\n done = False\n while not done:\n while left_pointer <= right_pointer and array[left_pointer] <= pivot:\n # Slide to the right\n left_pointer += 1\n \n while array[right_pointer] >= pivot and right_pointer >= left_pointer:\n # Slide to the left\n right_pointer -= 1\n\n if right_pointer < left_pointer:\n break\n\n # Swap the list items\n array[left_pointer], array[right_pointer] = array[right_pointer], array[left_pointer]\n \n # Swap the pivot with array[right_pointer]\n array[pivot_pointer], array[right_pointer] = array[right_pointer], array[pivot_pointer]\n\n # The item at right_pointer is now in the correct position (relative to the other items)\n return right_pointer\n\n\ndef quick_sort(array: list, start: int, end: int):\n \"\"\"Uses the quick sort algorithm to sort the provided list in-place\n \n - Only sorts a section of the list, as specified by the `start=` and `end=` parameters.\n - Returns a reference to the sorted list\n\n Parameters:\n - `array` - The list to sort. Items must be compatible with the `<`and `>`operators\n - `start` - The index of the first item to be sorted\n - `end` - The index of the last item to be sorted\n \"\"\"\n if start < end:\n # Partition the list\n split_point = partition(array, start, end)\n\n # Sort each half\n quick_sort(array, start, split_point - 1)\n quick_sort(array, split_point + 1, end)\n \n return array\n\n\nif __name__ == \"__main__\":\n demo_list = [9, 5, 4, 15, 3, 8, 11]\n print(demo_list)\n quick_sort(demo_list, 0, len(demo_list) - 1)\n print(demo_list)\n","repo_name":"RandomSearch18/sorting-algorithms","sub_path":"quick-sort.py","file_name":"quick-sort.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31145557796","text":"from tkinter import *\nfrom tkinter import filedialog\n\nroot = Tk()\nroot.title('Create video subtitles')\nfile_paths = {}\n\n\ndef get_video_path():\n root.video_path = filedialog.askopenfilename(initialdir=\"C:/\", title=\"Select video\",\n filetypes=((\".mp4 files\", \"*.mp4\"), (\"all files\", \"*.*\")))\n video_path = root.video_path\n file_paths['video_path'] = video_path\n print(file_paths)\n print(len(list(file_paths.keys())))\n if len(list(file_paths.keys())) == 2:\n start_button['state'] = NORMAL\n\n\ndef get_text_path():\n root.text_path = filedialog.askopenfilename(initialdir=\"C:/\", title=\"Select video\",\n filetypes=((\".txt files\", \"*.txt\"), (\"all files\", \"*.*\")))\n text_path = root.text_path\n file_paths['text_path'] = text_path\n print(file_paths)\n print(len(list(file_paths.keys())))\n if len(list(file_paths.keys())) == 2:\n start_button['state'] = NORMAL\n\n\nvideo_path_button = Button(root, text=\"Select video\", command=get_video_path)\ntext_path_button = Button(root, text=\"Select text\", command=get_text_path)\nstart_button = Button(root, text=\"Start\", state=DISABLED)\n\nvideo_path_button.pack()\ntext_path_button.pack()\nstart_button.pack()\n\n\nroot.mainloop()\n\n\n","repo_name":"MarcelRossol/OCR-subtitle-sychronisation","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29147070621","text":"from nltk import ne_chunk, pos_tag, word_tokenize\nfrom nltk.tree import Tree\nfrom nltk import word_tokenize\nfrom nltk import pos_tag\n\ndef get_ne_chunks(tagged_text):\n chunked = ne_chunk(tagged_text)\n continuous_chunk = []\n current_chunk = []\n for i in chunked:\n if type(i) == Tree:\n current_chunk.append(\" \".join([token for token, pos in i.leaves()]))\n if current_chunk:\n named_entity = \" \".join(current_chunk)\n if named_entity not in continuous_chunk:\n continuous_chunk.append(named_entity)\n current_chunk = []\n else:\n continue\n return continuous_chunk","repo_name":"AdeptLearner123/code-names-bot-processing-legacy","sub_path":"utils/nlp_utils.py","file_name":"nlp_utils.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31812177456","text":"from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\nimport torch\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nclass Point(object):\n def __init__(self, x, y):\n \"\"\"\n :param x: 直角坐标系x,坐标\n :param y: 直角坐标系y,坐标\n \"\"\"\n self.x = x\n self.y = y\n\n\ndef polar_to_rectangular(r, theta): # 极坐标转化为直角坐标\n x = r * math.cos(theta)\n y = r * math.sin(theta)\n return x, y\n\n\ndef rectangular_to_polar(x, y): # 直角坐标转化为极坐标\n r = math.sqrt(x ** 2 + y ** 2)\n theta = math.atan2(y, x) + math.pi # 返回 (0,2*pi] 的弧度\n return r, theta\n\n\ndef angle_to_radian(angle):\n \"\"\"\n 角度转弧度\n :return:\n \"\"\"\n return angle / 180 * math.pi\n\n\nclass Circle(object):\n # 一个圆\n def __init__(self, radius, center: Point):\n \"\"\"\n :param radius: 半径\n :param center: 圆心(直角坐标系)\n \"\"\"\n self.radius = radius\n self.center = center\n\n\ndef generate_circles(nums=4000, r_min=10, r_max=1000, x_min=-100, x_max=100, y_min=-100, y_max=100):\n \"\"\"\n 生成 指定个数,随机半径的同心圆。\n :param nums: 生成的圆的个数\n :param r_min: 最小半径\n :param r_max: 最大半径\n :param x_min: 圆心坐标范围\n :param x_max: 圆心坐标范围\n :param y_min: 圆心坐标范围\n :param y_max: 圆心坐标范围\n :return: list, 圆的半径列表\n \"\"\"\n circles = []\n for i in range(nums):\n radius = np.random.random() * (r_max - r_min) + r_min\n x = np.random.random() * (x_max - x_min) + x_min\n y = np.random.random() * (y_max - y_min) + y_min\n center = Point(x, y)\n circles.append(Circle(radius, center))\n return circles\n\n\ndef generate_data_pairs(circles, l_min=5, l_max=20, nums=1000, angle_interval=1):\n \"\"\"\n 在指定的 circles 里,随机抽数据集。(用来训练或者测试)\n :param circles: list, 圆的半径列表\n :param l_min: 序列的最小长度\n :param l_max: 序列的最大长度\n :param nums: 生成多少个数据对\n :param angle_interval: 序列角度每次增加多少度,默认 1 度。\n :return:\n random_circles: 生成数据使用的圆\n all_inputs: 一个列表,包含nums个输入数据,每个输入数据的形式为:[[x1,y1],[x2,y2] ........]\n all_targets: 一个列表,包含nums个标签数据,每个标签数据的形式为:[[x1,y1],[x2,y2] ........]\n \"\"\"\n result = []\n random_starts = np.random.randint(0, 360, size=nums) # 随机起点\n random_circles = np.random.choice(circles, size=nums, replace=True) # 随机圆\n random_lens = np.random.randint(l_min, l_max + 1, size=nums) # 随机输入序列长度, 范围 [l_min, l_max]\n all_inputs = [] # 多个输入序列的列表\n all_targets = [] # 多个标签的列表\n for i in range(nums):\n one_input = [] # 第i组数据的 输入序列\n one_target = [] # 第i组数据的 标签\n for j in range(random_lens[i]):\n x, y = polar_to_rectangular(r=random_circles[i].radius,\n theta=angle_to_radian(random_starts[i] + j * angle_interval))\n x += random_circles[i].center.x\n y += random_circles[i].center.y\n one_input.append([x, y])\n # 后面一个点的坐标当作当前输出\n x, y = polar_to_rectangular(r=random_circles[i].radius,\n theta=angle_to_radian(random_starts[i] + (j + 1) * angle_interval))\n x += random_circles[i].center.x\n y += random_circles[i].center.y\n one_target.append([x, y])\n all_inputs.append(one_input)\n all_targets.append(one_target)\n return random_circles, all_inputs, all_targets\n\n\ndef plot_data_pairs(all_inputs, all_targets):\n \"\"\"\n 把 generate_data_pairs 生成的数据绘制出来 (调试用)\n \"\"\"\n plt.figure()\n for i in range(len(all_inputs)):\n one_input = all_inputs[i]\n one_target = all_targets[i]\n plt.plot([point[0] for point in one_input], [point[1] for point in one_input], label='input_%d' % i)\n # plt.plot([point[0] for point in one_target], [point[1] for point in one_target], label='target_%d' % i)\n # 绘制最后一个 target\n last_target = all_targets[i][-1]\n plt.plot(last_target[0], last_target[1], 'o', label='output_%d' % i)\n plt.legend()\n plt.show()\n\n\ndef test_plot_data_pairs():\n \"\"\"\n 生成一些数据对 并绘制出来\n :return:\n \"\"\"\n circles = generate_circles(nums=100, r_min=10, r_max=100)\n _, all_inputs, all_targets = generate_data_pairs(circles=circles, l_min=150, l_max=300, nums=5, angle_interval=1)\n plot_data_pairs(all_inputs, all_targets)\n\n\ndef get_rise(sequences):\n \"\"\"\n 把 generate_data_pairs 生成的 all_inputs 或者 all_targets 转化为波动率\n :return:\n first_items : list, sequences 里 每个 sequence 的第一个数据 的列表(用于数据还原)\n new_sequences : list, 转化为波动率之后的 sequences\n \"\"\"\n eps = 1e-11\n first_items = []\n new_sequences = []\n for sequence in sequences:\n new_sequence = []\n first_items.append(sequence[0])\n for i in range(len(sequence) - 1):\n cur_item = sequence[i]\n next_item = sequence[i + 1]\n new_item = [0, 0]\n new_item[0] = (next_item[0] - cur_item[0]) / (next_item[0] + eps)\n new_item[1] = (next_item[1] - cur_item[1]) / (next_item[1] + eps)\n new_sequence.append(new_item)\n new_sequences.append(new_sequence)\n return first_items, new_sequences\n\n\ndef get_diff(sequences):\n \"\"\"\n 把 generate_data_pairs 生成的 all_inputs 或者 all_targets 转化为 差值\n :return:\n first_items : list, sequences 里 每个 sequence 的第一个数据 的列表(用于数据还原)\n new_sequences : list, 转化为 差值 之后的 sequences\n \"\"\"\n first_items = []\n new_sequences = []\n for sequence in sequences:\n new_sequence = []\n first_items.append(sequence[0])\n for i in range(len(sequence) - 1):\n cur_item = sequence[i]\n next_item = sequence[i + 1]\n new_item = [0, 0]\n new_item[0] = next_item[0] - cur_item[0]\n new_item[1] = next_item[1] - cur_item[1]\n new_sequence.append(new_item)\n new_sequences.append(new_sequence)\n return first_items, new_sequences\n\n\ndef unget_rise(first_items, sequences):\n \"\"\"\n 与 get_rise 做相反的操作\n :param first_items: list, sequences 里 每个 sequence 的第一个数据 的列表\n :param sequences: list, 转化为波动率之后的 sequences\n :return:\n \"\"\"\n eps = 1e-11\n new_sequences = []\n for i, sequence in enumerate(sequences):\n new_sequence = []\n new_sequence.append(first_items[i])\n last_item = first_items[i]\n for j in range(len(sequence)):\n cur_rise = sequence[j]\n cur_item = [0, 0]\n cur_item[0] = (eps * cur_rise[0] + last_item[0]) / (1 - cur_rise[0])\n cur_item[1] = (eps * cur_rise[1] + last_item[1]) / (1 - cur_rise[1])\n new_sequence.append(cur_item)\n last_item = cur_item\n new_sequences.append(new_sequence)\n return new_sequences\n\n\ndef unget_diff(first_items, sequences):\n \"\"\"\n 与 get_rise 做相反的操作\n :param first_items: list, sequences 里 每个 sequence 的第一个数据 的列表\n :param sequences: list, 转化为波动率之后的 sequences\n :return:\n \"\"\"\n new_sequences = []\n for i, sequence in enumerate(sequences):\n new_sequence = []\n new_sequence.append(first_items[i])\n last_item = first_items[i]\n for j in range(len(sequence)):\n cur_rise = sequence[j]\n cur_item = [0, 0]\n cur_item[0] = cur_rise[0] + last_item[0]\n cur_item[1] = cur_rise[1] + last_item[1]\n new_sequence.append(cur_item)\n last_item = cur_item\n new_sequences.append(new_sequence)\n return new_sequences\n\n\ndef test_get_rise():\n sequences = [\n [[0, 0], [1, 1], [2, 2]],\n [[1, 2], [4, 5], [6, 7]],\n [[3, 4], [7, 8]]\n ]\n first_items, new_sequences = get_rise(sequences)\n print(sequences)\n print(first_items)\n print(new_sequences)\n print(\"---\")\n print(unget_rise(first_items, new_sequences))\n\n\ndef test_get_diff():\n sequences = [\n [[0, 0], [1, 1], [2, 2]],\n [[1, 2], [4, 5], [6, 7]],\n [[3, 4], [7, 8]]\n ]\n first_items, new_sequences = get_diff(sequences)\n print(sequences)\n print(first_items)\n print(new_sequences)\n print(\"---\")\n print(unget_diff(first_items, new_sequences))\n\n\ndef padding_variable_length_sequence(all_sequences):\n \"\"\"\n generate_data_pairs 函数 生成的 all_inputs 和 all_target 是变长的序列\n 使用这个方法,可以把他们打包为一个 用0填充的 Tensor ( 方便神经网络并行计算 )\n example:\n 打包前:\n # all_inputs\n [[0, 1, 2, 3, 4, 5, 6],\n [7, 7],\n [6, 8]]\n # all_target\n [[1, 2, 3, 3, 3, 1, 4],\n [5, 5],\n [4, 5]]\n 打包后:\n # all_inputs\n array([[ 1., 2., 3., 4., 5., 6., 7.],\n [ 8., 8., 0., 0., 0., 0., 0.],\n [ 7., 9., 0., 0., 0., 0., 0.]])\n # all_target\n array([[ 1., 2., 3., 3., 3., 1., 4.],\n [ 5., 5., 0., 0., 0., 0., 0.],\n [ 4., 5., 0., 0., 0., 0., 0.]])\n :param all_sequences: 可以是 all_inputs 或者 all_target\n :return:\n array : torch.Tensor, 打包后的数组\n lengths : torch.Tensor, 每个序列的长度\n \"\"\"\n # 计算每个序列的长度\n lengths = [len(sequence) for sequence in all_sequences]\n lengths = torch.Tensor(lengths).long()\n sequences = [torch.Tensor(sequence) for sequence in all_sequences]\n array = pad_sequence(sequences, batch_first=True, padding_value=-100)\n return array, lengths\n\n\ndef unpadding_variable_length_sequence(array: torch.Tensor, lengths):\n \"\"\"\n 与 padding_variable_length_sequence 做相反的过程\n :param array: torch.Tensor, 打包后的数组\n :param lengths: torch.Tensor, 每个序列的长度\n :return: list, 还原后的序列\n \"\"\"\n lengths = lengths.detach().cpu().numpy().tolist()\n array = array.detach().cpu().numpy()\n all_sequences = []\n batch_size, max_len, input_size = array.shape\n for i in range(batch_size):\n all_sequences.append(array[i, 0:int(lengths[i])].tolist())\n return all_sequences\n\n\ndef get_acc(random_circles, predict_targets, targets):\n \"\"\"\n 计算准确度\n 准确度判断方法:\n 以目标点为圆心,间隔1度的点的直线为直径的圆内\n :param random_circles: list,\n :param predict_targets: list, 每一项是一个预测出的点\n :param targets: list, 每一项是标签点\n :return:\n \"\"\"\n nb_correct = 0\n for i in range(len(random_circles)):\n # 计算准确度允许的误差距离 (圆上 转过1度 的直线距离 的 二分之一)\n target_distance = random_circles[i].radius * math.sin(angle_to_radian(1 / 2))\n distance = (predict_targets[i][0] - targets[i][0]) ** 2 # (x1-x2)^2\n distance += (predict_targets[i][1] - targets[i][1]) ** 2 # (y1-y2)^2\n # 预测出的点 与目标点的距离\n distance = math.sqrt(distance)\n if distance <= target_distance:\n nb_correct += 1\n return nb_correct / len(random_circles)\n\n\nif __name__ == \"__main__\":\n test_plot_data_pairs()\n","repo_name":"GameHoo/CirclesExperiment","sub_path":"data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":11991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17294454583","text":"# https://leetcode.com/problems/symmetric-tree/\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def isSymmetric(self, root):\n def SymmetricArray(arr):\n i = 0\n ans = True\n while i != len(arr) // 2:\n if arr[i] != arr[-i-1]:\n ans = False\n break\n i += 1\n return ans\n def bfs(nodes):\n new_nodes = []\n values = []\n for node in nodes:\n if node != None:\n new_nodes.append(node.left)\n new_nodes.append(node.right)\n values.append(node.val)\n else:\n values.append(None) \n if not SymmetricArray(values):\n return False\n else:\n if len(new_nodes) > 0:\n return bfs(new_nodes)\n return True\n return bfs([root])","repo_name":"eldor-galiev/LeetcodeTasks","sub_path":"Breadth-First Search/101.py","file_name":"101.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30838238653","text":"import csv\nimport MySQLdb\nimport os\n\nfrom django.core.management import BaseCommand\n\nfrom extlinks.links.helpers import split_url_for_query\nfrom extlinks.links.models import LinkSearchTotal, URLPattern\nfrom extlinks.settings.base import BASE_DIR\n\n\nclass Command(BaseCommand):\n help = \"Updates link totals from externallinks table\"\n\n def handle(self, *args, **options):\n protocols = [\"http\", \"https\"]\n\n with open(os.path.join(BASE_DIR, \"wiki-list.csv\"), \"r\") as wiki_list:\n csv_reader = csv.reader(wiki_list)\n wiki_list_data = []\n for row in csv_reader:\n wiki_list_data.append(row[0])\n\n all_urlpatterns = URLPattern.objects.all()\n\n total_links_dictionary = {}\n for i, language in enumerate(wiki_list_data):\n db = MySQLdb.connect(\n host=\"{lang}wiki.analytics.db.svc.wikimedia.cloud\".format(lang=language),\n user=os.environ[\"REPLICA_DB_USER\"],\n passwd=os.environ[\"REPLICA_DB_PASSWORD\"],\n db=\"{lang}wiki_p\".format(lang=language),\n )\n\n cur = db.cursor()\n\n for urlpattern in all_urlpatterns:\n # For the first language, initialise tracking\n if i == 0:\n total_links_dictionary[urlpattern.pk] = 0\n\n url = urlpattern.url\n optimised_url, url_pattern_end = split_url_for_query(url)\n\n for protocol in protocols:\n url_pattern_start = protocol + \"://\" + optimised_url\n\n cur.execute(\n \"\"\"SELECT COUNT(*) FROM externallinks\n WHERE el_index LIKE '{url_start}'\n AND el_index LIKE '{url_end}'\n \"\"\".format(\n url_start=url_pattern_start, url_end=url_pattern_end\n )\n )\n\n this_num_urls = cur.fetchone()[0]\n\n total_links_dictionary[urlpattern.pk] += this_num_urls\n\n for urlpattern_pk, total_count in total_links_dictionary.items():\n linksearch_object = LinkSearchTotal(\n url=URLPattern.objects.get(pk=urlpattern_pk), total=total_count\n )\n linksearch_object.save()\n","repo_name":"WikipediaLibrary/externallinks","sub_path":"extlinks/links/management/commands/linksearchtotal_collect.py","file_name":"linksearchtotal_collect.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"5744947491","text":"#User function Template for python3\n\n\nclass Solution:\n\n #Function to find two repeated elements.\n def twoRepeated(self, arr, N):\n i = 0\n while i < len(arr):\n if arr[i] <= 0:\n i += 1\n else:\n target_index = arr[i] - 1\n if target_index == i:\n arr[i] = -1\n else:\n to_be_swapped_value = arr[target_index]\n if to_be_swapped_value <= 0:\n arr[i] = 0\n arr[target_index] -= 1\n else:\n arr[i] = to_be_swapped_value\n arr[target_index] = -1\n\n print(arr)\n return [1, 2]\n\n\n#{\n# Driver Code Starts\n#Initial Template for Python 3\n\nimport math\n\n\ndef main():\n T = int(input())\n while (T > 0):\n\n N = int(input())\n\n A = [int(x) for x in input().strip().split()]\n\n obj = Solution()\n ans = obj.twoRepeated(A, N)\n print(ans[0], ans[1])\n\n T -= 1\n\n\nif __name__ == \"__main__\":\n main()\n# } Driver Code Ends","repo_name":"Aryanamish/daily_practice","sub_path":"geeks/searching/two_repeated_element.py","file_name":"two_repeated_element.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29027438257","text":"# coding: utf-8\n\n\n\nimport mock\nimport pytest\n\nfrom idm.sync.staff.users import import_tvm_apps, import_tvm_responsibles\nfrom idm.users.constants.group import GROUP_TYPES\nfrom idm.users.constants.user import USER_TYPES\nfrom idm.users.models import Group, GroupMembership, GroupResponsibility, User\n\npytestmark = [pytest.mark.django_db]\n\n\nclass MockedResponse(object):\n def __init__(self, json_data):\n self.json_data = json_data\n\n def json(self):\n return self.json_data\n\n\ndef create_tvm_json(external_id, name, service_id, service_slug, service_name, service_name_en):\n return {\n 'resource': {\n 'external_id': external_id,\n 'name': name,\n },\n 'service': {\n 'id': service_id,\n 'slug': service_slug,\n 'name': {\n 'ru': service_name,\n 'en': service_name_en,\n }\n }\n }\n\n\ndef test_import_tvm_apps():\n abc_api_responses = MockedResponse({\n 'next': None,\n 'results': [\n create_tvm_json(1, 'tvm_name_1', 10, 'a', 'A', 'A'),\n create_tvm_json(2, 'tvm_name_2', 20, 'b', 'B', 'B'),\n create_tvm_json(3, 'tvm_name_3', 10, 'a', 'A', 'A'),\n create_tvm_json(5, 'tvm_name_1', 20, 'b', 'B', 'B')\n ]\n })\n\n app_1 = User.objects.create(type=USER_TYPES.TVM_APP, username=1) # должно появиться имя\n app_4 = User.objects.create(type=USER_TYPES.TVM_APP, username=4) # должно стать неактивным\n app_5 = User.objects.create(type=USER_TYPES.TVM_APP, username=5)\n service_2 = Group.objects.create(type=GROUP_TYPES.TVM_SERVICE, external_id=20) # должно появиться имя\n service_4 = Group.objects.create(type=GROUP_TYPES.TVM_SERVICE, slug='d', name='D', name_en='D', external_id=40)\n membership_4 = GroupMembership.objects.create(user=app_4, group=service_4, state='active', is_direct=True)\n membership_5 = GroupMembership.objects.create(user=app_5, group=service_2, state='inactive', is_direct=True)\n\n with mock.patch('idm.sync.staff.users.http.get', return_value=abc_api_responses):\n import_tvm_apps()\n\n app_1.refresh_from_db()\n assert app_1.first_name == 'tvm_name_1'\n membership = GroupMembership.objects.select_related('group').get(user=app_1)\n assert membership.state == 'active'\n service_1 = membership.group\n assert service_1.type == GROUP_TYPES.TVM_SERVICE\n assert service_1.external_id == 10\n assert service_1.slug == 'a'\n assert service_1.name == 'A'\n assert service_1.name_en == 'A'\n\n app_2 = User.objects.get(type=USER_TYPES.TVM_APP, username=2)\n assert app_2.first_name == 'tvm_name_2'\n prev_service_2_id = service_2.id\n service_2 = GroupMembership.objects.select_related('group').get(user=app_2).group\n assert service_2.id == prev_service_2_id\n assert service_2.slug == 'b'\n assert service_2.name == 'B'\n assert service_2.name_en == 'B'\n\n app_3 = User.objects.get(type=USER_TYPES.TVM_APP, username=3)\n assert app_3.first_name == 'tvm_name_3'\n service_3 = GroupMembership.objects.select_related('group').get(user=app_3).group\n assert service_3.slug == 'a'\n assert service_3.name == 'A'\n assert service_3.name_en == 'A'\n\n app_4.refresh_from_db()\n assert app_4.is_active is False\n membership_4.refresh_from_db()\n assert membership_4.state == 'inactive'\n\n membership_5.refresh_from_db()\n assert membership_5.state == 'active'\n\n assert Group.objects.tvm_groups().count() == 3\n\n\ndef test_import_tvm_responsibles():\n tvm_service = Group.objects.create(type=GROUP_TYPES.TVM_SERVICE, external_id=100)\n odd_group = Group.objects.create(type=GROUP_TYPES.DEPARTMENT, external_id=666)\n user_a = User.objects.create(username='a')\n user_b = User.objects.create(username='b')\n user_c = User.objects.create(username='c')\n resp_a = GroupResponsibility.objects.create(group=tvm_service, user=user_a, is_active=True)\n resp_c = GroupResponsibility.objects.create(group=tvm_service, user=user_c, is_active=False)\n resp_d = GroupResponsibility.objects.create(group=odd_group, user=user_c, is_active=False)\n\n abc_api_responses = MockedResponse({\n 'next': None,\n 'results': [\n {\n 'person': {\n 'login': 'b'\n },\n },\n {\n 'person': {\n 'login': 'c'\n },\n },\n ]\n })\n with mock.patch('idm.sync.staff.users.http.get', return_value=abc_api_responses):\n import_tvm_responsibles()\n\n resp_a.refresh_from_db()\n assert not resp_a.is_active\n\n resp_b = GroupResponsibility.objects.get(group=tvm_service, user=user_b)\n assert resp_b.is_active\n\n resp_c.refresh_from_db()\n assert resp_c.is_active\n\n resp_d.refresh_from_db()\n assert not resp_d.is_active\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/sync/test_tvm.py","file_name":"test_tvm.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23927736345","text":"# Action은 왼쪽으로 이동 (a=0), 오른쪽으로 이동(a=1)\n# Action 6번하면 Episode 종료\n# Action을 (0, 1, 0, 1, 0, 0) 순서로 취했을 때 R = +1000\n# Reward는 오른쪽은 +1, 왼쪽은 -1\n# (1, 1, 1, 1, 1, 1)은 local optimal : 나오면 안되는 경우\n\n####################################\n# # # # # #\n# # # 시작 # # #\n# # # # # #\n####################################\n\nimport numpy as np\nimport random\nimport copy\n\nclass LR_world():\n def __init__(self):\n self.x = []\n\n def step(self, a):\n if a == 0:\n if self.x == [0, 1, 0, 1, 0]:\n reward = +1000\n else:\n reward = -1\n self.move_left()\n else:\n reward = +1\n self.move_right()\n\n done = self.is_done()\n return self.x, reward, done\n\n def move_left(self):\n self.x.append(0)\n\n def move_right(self):\n self.x.append(1)\n\n def is_done(self):\n if len(self.x) == 6:\n return True\n else: \n return False\n \n def reset(self):\n self.x = []\n return self.x\n\nclass QAgent():\n def __init__(self):\n self.q_table = np.zeros((127,2)) \n self.eps = 0.9\n self.alpha = 0.01\n\n # def state(self, s):\n # state = 0\n # if len(s) == 0:\n # state = 1\n # else:\n # state += int(\"\".join([str(bit) for bit in s]), 2)\n # return state\n \n def state(self, s):\n state = 0\n for bit in s:\n state = state * 2 + bit\n if len(s) > 0:\n state += 2 ** (len(s) + 1)\n return state\n\n def select_action(self, s):\n k = self.state(s)\n coin = random.random()\n if coin < self.eps:\n action = random.randint(0,1)\n else:\n action_val = self.q_table[k,:]\n action = np.argmax(action_val)\n return action\n\n def select_bestaction(self, s):\n x = self.state(s)\n action_val = self.q_table[x,:]\n action = np.argmax(action_val)\n return action\n\n def update_table(self, history):\n cum_reward = 0\n for transition in history[::-1]:\n s, a, r, s_prime = transition\n x = self.state(s)\n cum_reward = cum_reward + r\n self.q_table[x, a] = self.q_table[x, a] + self.alpha * (cum_reward - self.q_table[x, a])\n return cum_reward\n\n def anneal_eps(self):\n self.eps -= 0.001\n self.eps = max(self.eps, 0.2)\n\n def show_table(self):\n q_lst = self.q_table.tolist()\n #print(q_lst)\n \ndef main():\n env = LR_world()\n agent = QAgent()\n best_score = -float('inf')\n best_epi = []\n\n for n_epi in range(15000):\n done = False\n history = []\n score = 0.0\n\n s = env.reset()\n while not done:\n s = s[:]\n a = agent.select_action(s)\n s_prime, r, done = env.step(a)\n history.append((copy.deepcopy(s), a, r, copy.deepcopy(s_prime)))\n s = s_prime\n score += r\n\n agent.update_table(history) \n agent.anneal_eps()\n\n # if score == 999.0:\n # best_epi.append(n_epi)\n\n # if n_epi%9==0:\n # print(\"n_episode : {}, score : {:.1f}\".format(n_epi, score))\n # agent.show_table()\n\n # if score >= best_score:\n # best_table = []\n # best_score = score\n # best_table = agent.q_table.tolist()\n\n # print(\"\\nBest table score : {:.1f}, best_episode 갯수: {}\".format(best_score, len(best_epi)))\n # print('Best table :', best_table)\n\n done=False\n s=env.reset()\n total_reward = 0\n while not done:\n s = s[:]\n a = agent.select_bestaction(s)\n s_prime, r, done = env.step(a)\n s_prime = s_prime[:]\n total_reward = total_reward + r\n s = s_prime\n\n #print(s,total_reward)\n \n return total_reward\n\naverage = 0\nfor i in range(100):\n total_reward = main()\n print(i+1 , \"회 최적정책 리워드는 \", total_reward)\n average = total_reward + average\n\nprint(average/100, \"은 평균\")","repo_name":"pjh09050/Reinforcement-Learning","sub_path":"LR_world_MC.py","file_name":"LR_world_MC.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8074667721","text":"import time\nimport os\nimport copy\nimport argparse\nimport pdb\nimport collections\nimport sys\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nfrom torchvision import datasets, models, transforms\nimport torchvision\n#import tensorflow as tf\nimport model\nfrom anchors import Anchors\nimport losses\nfrom dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\nfrom torch.utils.data import Dataset, DataLoader\n\nimport coco_eval\nimport csv_eval\n\n#assert torch.__version__.split('.')[1] == '4'\n\nprint('CUDA available: {}'.format(torch.cuda.is_available()))\n\n\ndef main(args=None):\n\n\tparser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n\n\tparser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='coco')\n\tparser.add_argument('--coco_path', help='Path to COCO directory', default='/data/deeplearning/dataset/coco2017')\n\tparser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)')\n\tparser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')\n\tparser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')\n\n\tparser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)\n\tparser.add_argument('--epochs', help='Number of epochs', type=int, default=100)\n\tparser.add_argument('--lr', help='learning rate', type=float, default=1e-2)\n\tparser.add_argument('--decay', type=int, default=0)\n\tparser.add_argument('--s_norm', help='normalize regression outputs', type=float, default=4.0)\n\tparser.add_argument('--t_val', help='sensitivity of per pyramid loss', type=float, default=1.7)\n\tparser.add_argument('--IOU', help='IoU loss or regular regression loss', type=int, default=1)\n\tparser.add_argument('--rest_norm', help='weight for rest region, i.e. not effective region', type=float, default=1.0)\n\tparser.add_argument('--center', help='center the per pyramid value', type=int, default=0)\n\tparser.add_argument('--adam', help='adam opt', type=int, default=0)\n\tparser.add_argument('--perc', help='adam opt', type=int, default=1)\n\tparser.add_argument('--batch_size', help='adam opt', type=int, default=2)\n\tparser.add_argument('--momentum', help='sgd momentum', type=float, default=0.9)\n\tparser.add_argument('--resume', help='path to model', type=str, default=None)\n\tparser.add_argument('--save_model_dir', default='/data/deeplearning/dataset/training/data/newLossRes')\n\tparser.add_argument('--log_dir', default='/data/deeplearning/dataset/training/data/log_dir')\n\tparser = parser.parse_args(args)\n\t#tf.summary.FileWriter(parser.log_dir)\n\t# Create the data loaders\n\tif parser.dataset == 'coco':\n\t\tif parser.coco_path is None:\n\t\t\traise ValueError('Must provide --coco_path when training on COCO,')\n\n\t\tdataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))\n\t\tdataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()]))\n\n\telif parser.dataset == 'csv':\n\t\tif parser.csv_train is None:\n\t\t\traise ValueError('Must provide --csv_train when training on COCO,')\n\t\tif parser.csv_classes is None:\n\t\t\traise ValueError('Must provide --csv_classes when training on COCO,')\n\t\tdataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))\n\t\tif parser.csv_val is None:\n\t\t\tdataset_val = None\n\t\t\tprint('No validation annotations provided.')\n\t\telse:\n\t\t\tdataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]))\n\n\telse:\n\t\traise ValueError('Dataset type not understood (must be csv or coco), exiting.')\n\n\tsampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False)\n\tdataloader_train = DataLoader(dataset_train, num_workers=0, collate_fn=collater, batch_sampler=sampler)\n\n\tif dataset_val is not None:\n\t\tsampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)\n\t\tdataloader_val = DataLoader(dataset_val, num_workers=0, collate_fn=collater, batch_sampler=sampler_val)\n\n\tif parser.resume is not None:\n\t\tretinanet = torch.load(parser.resume)\n\t\tstart_epoch = int(parser.resume.split('coco_retinanet_')[1].split('_')[0]) + 1\n\telse:\n\t\tstart_epoch = 0\n\t\t# Create the model\n\t\tif parser.depth == 18:\n\t\t\tretinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True)\n\t\telif parser.depth == 34:\n\t\t\tretinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True)\n\t\telif parser.depth == 50:\n\t\t\tretinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True)\n\t\telif parser.depth == 101:\n\t\t\tretinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True)\n\t\telif parser.depth == 152:\n\t\t\tretinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True)\n\t\telse:\n\t\t\traise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')\n\n\tuse_gpu = True\n\n\tif use_gpu:\n\t\tretinanet = retinanet.cuda()\n\t\n\tretinanet = torch.nn.DataParallel(retinanet).cuda()\n\n\tretinanet.training = True\n\tif parser.adam:\n\t\toptimizer = optim.Adam(retinanet.parameters(), lr=parser.lr)\n\t\tscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)\n\telse:\n\t\toptimizer = optim.SGD(retinanet.parameters(), lr=parser.lr, momentum=0.9, weight_decay=0.0001)\n\t\t#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)\n\t\tscheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)\n\n\tloss_hist = collections.deque(maxlen=500)\n\n\tretinanet.train()\n\tretinanet.module.freeze_bn()\n\n\tprint('Num training images: {}'.format(len(dataset_train)))\n\n\n\tfor epoch_num in range(start_epoch, parser.epochs):\n\n\t\tretinanet.train()\n\t\tretinanet.module.freeze_bn()\n\t\t\n\t\tepoch_loss = []\n\n\t\tfor iter_num, data in enumerate(dataloader_train):\n\t\t\ttry:\n\t\t\t\titer_loss = []\n\t\t\t\toptimizer.zero_grad()\n\n\t\t\t\t#per_picture_loss, follow_ = retinanet([data['img'].cuda().float(), data['annot']], parser)\n\t\t\t\tper_picture_loss= retinanet([data['img'].cuda().float(), data['annot']], parser)\n\n\t\t\t\tbatch_loss = per_picture_loss.mean()\n\t\t\t\t\n\t\t\t\tif bool(batch_loss == 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tbatch_loss.backward()\n\n\t\t\t\ttorch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)\n\n\t\t\t\toptimizer.step()\n\t\t\t\tif iter_num > 100:\n\t\t\t\t\tloss_hist.append(float(batch_loss))\n\n\t\t\t\tepoch_loss.append(float(batch_loss))\n\t\t\t\titer_loss.append(float(batch_loss))\n\t\t\t\t#example of pyramid losses\n\t\t\t\t#instance_loss = np.prod(follow_[0])\n\t\t\t\t#tf.summary.scalar('mean_iter_loss', np.mean(iter_loss))\n\t\t\t\tif iter_num % 10 == 0:\n\t\t\t\t\tprint('Epoch: {} | Iteration: {} | Loss: {:1.5f} | Running loss: {:1.5f}'\n\t\t\t\t\t\t .format(epoch_num, iter_num, np.mean(iter_loss), np.mean(loss_hist)))\n\t\t\t\tdel batch_loss\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\tcontinue\n\n#\t\tif parser.dataset == 'coco':\n#\n#\t\t\tprint('Evaluating dataset')\n#\t\t\tcoco_eval.evaluate_coco(dataset_val, retinanet, parser)\n#\n#\t\telif parser.dataset == 'csv' and parser.csv_val is not None:\n#\t\t\tprint('Evaluating dataset')\n#\t\t\tmAP = csv_eval.evaluate(dataset_val, retinanet)\n\n\t\tif not parser.adam:\n\t\t\tif parser.decay:\n\t\t\t\tscheduler.step()\n\t\telse:\n\t\t\tscheduler.step(np.mean(epoch_loss))\n\n\t\tprint('saving checkpoint')\n\t\ttorch.save(retinanet.module, os.path.join(parser.save_model_dir,'{}_retinanet_{}_perc_{}_tval_{}_bs_{}_lr_{}_ada_{}_mom_{}_decay_{}.pt'.format(parser.dataset, epoch_num, parser.perc, parser.t_val, parser.batch_size, parser.lr, parser.adam, parser.momentum, parser.decay)))\n\n\tretinanet.eval()\n\tprint('saving model')\n\ttorch.save(retinanet, os.path.join(parser.save_model_dir,'model_final_{}_perc_{}_tval_{}_bs_{}_lr_{}_ada_{}_mom_{}_decay_{}.pt'.format(epoch_num, parser.perc, parser.t_val, parser.batch_size, parser.lr, parser.adam, parser.momentum, parser.decay)))\n\nif __name__ == '__main__':\n main()\n","repo_name":"NoamRosenberg/anchor-free-retinanet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"40770802726","text":"\"\"\"empty message\n\nRevision ID: 0481e2e8abbc\nRevises: e6999daf4674\nCreate Date: 2021-11-21 17:26:30.784595\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '0481e2e8abbc'\ndown_revision = 'e6999daf4674'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('message_boards', sa.Column('title', sa.String(length=50), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('message_boards', 'title')\n # ### end Alembic commands ###\n","repo_name":"benthere914/Acquire-Market-Place","sub_path":"migrations/versions/20211121_172630_.py","file_name":"20211121_172630_.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29020250577","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom jinja2 import Environment, PackageLoader\n\nenv = Environment(loader=PackageLoader(package_name='static_api'))\nenv.globals['settings'] = settings\n\n\ndef test_docs_index():\n template = env.get_template('static_api/docs/index.html')\n\n rendered = template.render({\n 'static': lambda *a, **kw: 'static'\n })\n\n assert rendered\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/static_api_tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72269613840","text":"import sys\nfrom collections import deque\n\nn, l = map(int, sys.stdin.readline().split())\narr = [*map(int, sys.stdin.readline().split())]\nm = deque()\nfor i in range(n):\n tmp = arr[i]\n\n while m and m[-1] > tmp: m.pop()\n m.append(tmp)\n\n if i >= l and m[0] == arr[i-l]: m.popleft()\n print(m[0], end=' ')","repo_name":"haecheol-shin/algorithm_exercise","sub_path":"BOJ_DataStructure/BOJ_11003.py","file_name":"BOJ_11003.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31485312777","text":"from marshmallow import fields\nfrom neomodel import RelationshipTo\nfrom src.GCF.decorators.OntologyClass import decorator_schema\nfrom src.Models.CRM.v5_0_2.NodeEntities.E18_Physical_Thing import (\n E18_Physical_Thing,\n E18_Physical_ThingSchema,\n)\nfrom src.Models.CRM.v5_0_2.NodeEntities.E26_Physical_Feature import (\n E26_Physical_FeatureSchema,\n)\nfrom src.Models.CRM.v5_0_2.NodeProperties.P54_has_current_permanent_location import (\n P54_has_current_permanent_location,\n)\nfrom src.Models.CRM.v5_0_2.NodeProperties.P55_has_current_location import (\n P55_has_current_location,\n)\nfrom src.Models.CRM.v5_0_2.NodeProperties.P56_bears_feature import P56_bears_feature\n\n\n@decorator_schema\nclass E19_Physical_ObjectSchema(E18_Physical_ThingSchema):\n has_current_permanent_location = fields.List(\n fields.Nested(\"src.Models.CRM.v5_0_2.NodeEntities.E53_Place.E53_PlaceSchema\")\n )\n has_current_location = fields.List(\n fields.Nested(\"src.Models.CRM.v5_0_2.NodeEntities.E53_Place.E53_PlaceSchema\")\n )\n bears_feature = fields.List(fields.Nested(E26_Physical_FeatureSchema))\n\n\nclass E19_Physical_Object(E18_Physical_Thing):\n has_current_permanent_location = RelationshipTo(\n \".E53_Place.E53_Place\",\n \"P54_has_current_permanent_location\",\n model=P54_has_current_permanent_location,\n )\n has_current_location = RelationshipTo(\n \".E53_Place.E53_Place\",\n \"P55_has_current_location\",\n model=P55_has_current_location,\n )\n bears_feature = RelationshipTo(\n \".E26_Physical_Feature.E26_Physical_Feature\",\n \"P56_bears_feature\",\n model=P56_bears_feature,\n )\n\n def __init__(self, schema=None, *args, **kwargs):\n if schema is None:\n schema = E19_Physical_ObjectSchema()\n\n super().__init__(schema, *args, **kwargs)\n","repo_name":"feup-infolab/archgraph","sub_path":"old_project/src/Models/CRM/v5_0_2/NodeEntities/E19_Physical_Object.py","file_name":"E19_Physical_Object.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74115816722","text":"# 11.1. Interacting with HTTP Services As a Client\n\n\n# 知识点:\n# requests 模块的文档(http://docs.python-requests.org)质量很高\n# 在要同一个真正的站点进行交互前,先在 httpbin.org 这样的网站上做实验常常是可取的办法\n#json 格式是单引号,不可以写成双引号\n\nimport requests\nr = requests.get('http://httpbin.org/get?name=Dave&n=37',headers = { 'User-agent': 'goaway/1.0'})\n\n##原版错误之处,json()括号\nresp = r.json()\nprint(resp)\n\n\nprint(resp['headers'])\nprint(resp['args'])\n\n# url = 'http://httpbin.org/post'\n# files = { 'file': ('data.csv', open('data.csv', 'rb')) }\n# r = requests.post(url, files=files)\n\n\nresp = requests.head('http://www.python.org/index.html')\n\nstatus = resp.status_code\n# last_modified = resp.headers['last-modified']\n# content_type = resp.headers['content-type']\n# content_length = resp.headers['content-length']\n\n# 301 redirect: 301 代表永久性转移(Permanently Moved),\n# 302 redirect: 302 代表暂时性转移(Temporarily Moved ),\nprint(status) #301\n\n# print(last_modified)\n# print(content_length)\n# print(content_type)\n\n\n\n\n####### post ------如果有更复杂的请访问(https://pypi.python.org/pypi/requests)\nimport requests\n# Base URL being accessed\nurl = 'http://httpbin.org/post'\n# Dictionary of query parameters (if any)\nparms = {\n 'name1' : 'value1',\n 'name2' : 'value2'\n}\n# Extra headers\nheaders = {\n 'User-agent' : 'none/ofyourbusiness',\n 'Spam' : 'Eggs'\n}\nresp = requests.post(url, data=parms, headers=headers)\n# Decoded text returned by the request\ntext = resp.text\nprint(text)\n# {\n# \"args\": {},\n# \"data\": \"\",\n# \"files\": {},\n# \"form\": {\n# \"name1\": \"value1\",\n# \"name2\": \"value2\"\n# },\n# \"headers\": {\n# \"Accept\": \"*/*\",\n# \"Accept-Encoding\": \"gzip, deflate\",\n# \"Connection\": \"close\",\n# \"Content-Length\": \"25\",\n# \"Content-Type\": \"application/x-www-form-urlencoded\",\n# \"Host\": \"httpbin.org\",\n# \"Spam\": \"Eggs\",\n# \"User-Agent\": \"none/ofyourbusiness\"\n# },\n# \"json\": null,\n# \"origin\": \"47.88.226.161\",\n# \"url\": \"http://httpbin.org/post\"\n# }\n","repo_name":"CuteSmartTiger/mastering_python","sub_path":"NetworkAndWebProgramming/request_get_post.py","file_name":"request_get_post.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31725067020","text":"# lexer.py\n# Author: Thomas MINIER - MIT License 2019\nimport re\nfrom pyparsing import CaselessKeyword, Keyword, LineEnd, Literal, MatchFirst, OneOrMore, Optional, Group, Regex, ZeroOrMore\n\n\ndef ListOf(content: Group, start_char: str = \"(\", end_char: str = \")\", separator: str = \",\") -> Group:\n \"\"\"Build a group that matches a list of the same tokens.\n\n Args:\n * content: A Group of tokens.\n * start_char: Character at the start of the the list.\n * end_char: Character at the end of the the list.\n * separator: Character used to sperate elements in the list\n \"\"\"\n # list_content = MatchFirst([\n # content,\n # content + Optional(Literal(separator)).suppress()\n # ])\n return Group(Literal(start_char).suppress() + OneOrMore(content + Optional(Literal(separator)).suppress()) + Literal(end_char).suppress())\n\n# ----- General terms ------\n\n\nuriref = r'(<([^:]+:[^\\s\"<>]+)>|(([A-Za-z0-9]|-)+):([A-Za-z0-9]+))'\nliteral = r'\"([^\"\\\\]*(?:\\\\.[^\"\\\\]*)*)\"'\nlitinfo = r'(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\\^\\^' + uriref + r')?'\n\nr_line = re.compile(r'([^\\r\\n]*)(?:\\r\\n|\\r|\\n)')\nr_wspace = re.compile(r'[ \\t]*')\nr_wspaces = re.compile(r'[ \\t]+')\nr_tail = re.compile(r'[ \\t]*\\.[ \\t]*(#.*)?')\nr_uriref = re.compile(uriref)\nr_nodeid = re.compile(r'_:([A-Za-z0-9]*)')\nr_literal = re.compile(literal + litinfo)\nr_variable = re.compile(r'\\?([A-Za-z0-9]+)')\nr_prefix = re.compile(r'([A-Za-z0-9]|-)+')\n\n# a suppressed comma (',')\ncomma = Literal(',').suppress()\n\n# A Turtle Prefix\nprefixName = Regex(r_prefix)\n\n# The special 'none' keyword (a shorthand notation for ottr:None)\nottrNone = Keyword('none')\n\n# a RDF IRI\niri = Regex(r_uriref)\n\n# a RDF Blank Node\nbnode = Regex(r_nodeid)\n\n# a SPARQL variable\nvariable = Regex(r_variable)\n\n# a RDF Literal\nliteral = Regex(r_literal)\n\n# An IRI or a Variable\niriOrVariable = MatchFirst([iri, variable])\n\n# Any valid RDF terms\nanyTerm = MatchFirst([ottrNone, iri, literal, bnode, variable])\n\n# Any valid concrete RDF terms, i.e., excluding SPARQL variables\nconcreteTerm = MatchFirst([ottrNone, iri, literal, bnode])\n\n# ----- stOTTR language rules ------\n\n# The List type, where T is a type IRI\nlistType = Literal(\"List<\").suppress() + iri + Literal(\">\").suppress()\n\n# The type of a parameter\nparamType = MatchFirst([\n listType.setResultsName('listType'),\n iri.setResultsName('type')\n])\n\n# The value of an argument\nargumentValue = MatchFirst([\n anyTerm,\n ListOf(anyTerm),\n Group(Literal(\"++\").suppress() + anyTerm)\n])\n\n# The value of a concrete argument, i.e., without any variables\nconcreteArgument = MatchFirst([\n concreteTerm,\n ListOf(concreteTerm)\n])\n\n# A template parameter definition, with optional type and nonblank\n# Examples: \"?iri\", \"xsd:string ?literal\", \"! otrr:IRI ?iri\" or \"?iri = ex:Ann\"\nparam = Group(\n Optional(Keyword('!')).setResultsName('nonblank') +\n Optional(Keyword('?')).setResultsName('optional') +\n Optional(paramType) +\n variable.setResultsName('value') +\n Optional(Keyword('=') + concreteTerm.setResultsName('default'))\n).setResultsName('parameter') + Optional(',').suppress()\n\n# A list of template parameters\nparamList = Group(\n Literal('[').suppress() +\n ZeroOrMore(param) +\n Literal(']').suppress()\n)\n\n# An instance of a template which may contains variables\n# like ottr:Triple (_:person, rdf:type, ?person)\ninstanceWithVars = Group(\n iri.setResultsName('name') +\n Literal('(').suppress() +\n OneOrMore(argumentValue + Optional(comma).suppress()).setResultsName('arguments') +\n Literal(')').suppress()\n)\n\n# An expansion of an instance\n# example : cross | ottr:Triple(?s, ?p, ++?o)\nexpansionMode = Group(\n Keyword(\"cross\").setResultsName('type') +\n Keyword(\"|\").suppress() +\n instanceWithVars.setResultsName('content')\n)\n\n# A concrete instance of a template (which cannot contains variables)\n# like ex:MyTemplate (ex:Ann, foaf:Person, \"Ann Strong\")\nconcreteInstance = Group(\n iri.setResultsName('name') +\n Literal('(').suppress() +\n OneOrMore(concreteArgument + Optional(comma).suppress()).setResultsName('arguments') +\n Literal(')').suppress()\n)\n\n# A stOTTR prefix declaration\nprefixDeclaration = Group(\n CaselessKeyword(\"@prefix\").suppress() +\n prefixName.setResultsName('name') +\n Literal(':').suppress() +\n iri.setResultsName('value') +\n Literal('.').suppress()\n)\n\n# A stOTTR template\nottrTemplate = Group(\n iri.setResultsName('name') +\n paramList.setResultsName('parameters') +\n Literal('::').suppress() +\n Literal('{').suppress() +\n ZeroOrMore(\n MatchFirst([instanceWithVars, expansionMode]) +\n Optional(',').suppress()\n ).setResultsName('instances') +\n Literal('}').suppress() + Literal('.').suppress()\n)\n\n# Several stOTTR templates\nottrRoot = ZeroOrMore(prefixDeclaration + LineEnd().suppress()).setResultsName('prefixes') + OneOrMore(ottrTemplate + LineEnd().suppress()).setResultsName('templates')\n\n# Several concrete stOTTR instances (with no variables allowed)\nottrRootInstances = ZeroOrMore(prefixDeclaration + LineEnd().suppress()).setResultsName('prefixes') + OneOrMore(concreteInstance + Keyword('.').suppress() + Optional(LineEnd()).suppress()).setResultsName('instances')\n\n\ndef lex_templates_stottr(text: str) -> Group:\n \"\"\"Run the lexer on a set of stOTTR template defintions.\n\n Argument: A set of stOTTR template defintions as text.\n\n Returns: The lexed stOTTR template defintions.\n \"\"\"\n return ottrRoot.parseString(text)\n\n\ndef lex_instances_stottr(text: str) -> Group:\n \"\"\"Run the lexer on a set of stOTTR instances.\n\n Argument: A set of stOTTR instances as text.\n\n Returns: The lexed stOTTR instances.\n \"\"\"\n return ottrRootInstances.parseString(text)\n","repo_name":"Callidon/pyOTTR","sub_path":"ottr/parsers/stottr/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"21680130892","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nfrom Hw3.Ex2.Utils import *\n\n\nclass GatedResidualBlock(nn.Module):\n \"\"\"\n Gated residual block\n \"\"\"\n\n def __init__(self, channels, kernel_size=3):\n super(GatedResidualBlock, self).__init__()\n self.channels = channels\n self.kernel_size = kernel_size\n\n self.conv = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(channels, 2 * channels, kernel_size=kernel_size, padding=1)\n )\n\n def forward(self, x):\n x = self.conv(x)\n x, gate = torch.chunk(x, 2, dim=1)\n gate = torch.sigmoid(gate)\n return x * gate\n\n\nclass ResidualStack(nn.Module):\n def __init__(self):\n super(ResidualStack, self).__init__()\n\n layers = []\n\n for _ in range(5):\n layers.append(nn.ReLU())\n layers.append(nn.Conv2d(128, 64, 3, stride=1, padding=1))\n layers.append(nn.ReLU())\n layers.append(nn.Conv2d(64, 128, 3, stride=1, padding=1))\n layers.append(GatedResidualBlock(channels=128))\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.layers(x)\n return out + x\n\n\nclass ConvEncoder(nn.Module):\n def __init__(self, latent_dim=64):\n super(ConvEncoder, self).__init__()\n\n self.latent_dim = latent_dim\n self.layers = [nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1)] + \\\n [nn.ReLU()] + \\\n [nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1)] + \\\n [nn.ReLU()] + \\\n [nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)]\n self.layers.append(ResidualStack())\n self.layers = nn.Sequential(*self.layers)\n conv_out_dim = (32 // 8) ** 2 * 128\n self.fc = nn.Linear(conv_out_dim, 2 * latent_dim)\n\n def forward(self, x):\n out = self.layers(x)\n out = out.view(out.shape[0], -1)\n mu, log_var = self.fc(out).chunk(2, dim=1)\n return mu, log_var\n\n\nclass ConvDecoder(nn.Module):\n def __init__(self, latent_dim=16):\n super(ConvDecoder, self).__init__()\n\n self.latent_dim = latent_dim\n self.conv_in_size = (64, 32 // 8, 32 // 8)\n self.fc = nn.Linear(latent_dim, np.prod(self.conv_in_size))\n self.layers = [nn.ReLU()] + [nn.ConvTranspose2d(64, 128, kernel_size=3, stride=2)] + \\\n [ResidualStack()] + [nn.ReLU()] + \\\n [nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1)] + [nn.ReLU()] + \\\n [nn.ConvTranspose2d(64, 6, kernel_size=4, stride=2, padding=2)]\n self.layers = nn.Sequential(*self.layers)\n\n def forward(self, z):\n out = self.fc(z)\n out = out.view(out.shape[0], *self.conv_in_size)\n mu, log_var = self.layers(out).chunk(2, dim=1)\n return mu, log_var\n\n\nclass ConvVAE(nn.Module):\n \"\"\" Was lazy with this model so hardcoded everything \"\"\"\n\n def __init__(self, latent_dim=16):\n super(ConvVAE, self).__init__()\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.latent_dim = latent_dim\n\n self.encoder = ConvEncoder(latent_dim)\n self.decoder = ConvDecoder(latent_dim)\n\n def reparameterize(self, mu, lv):\n std = lv.mul(0.5).exp()\n z = mu + torch.randn_like(mu).mul(std)\n return z\n\n def TopDown(self, z):\n mu_x, lv_x = self.decoder(z)\n x_recon = self.reparameterize(mu_x, lv_x)\n return x_recon\n\n def forward(self, x, noise=True):\n mu_z, lv_z = self.encoder(x)\n z = self.reparameterize(mu_z, lv_z)\n mu_x, lv_x = self.decoder(z)\n if noise:\n x_recon = self.reparameterize(mu_x, lv_x)\n else:\n x_recon = mu_x\n return x_recon\n\n def calc_loss(self, x):\n mu_z, lv_z = self.encoder(x)\n z = self.reparameterize(mu_z, lv_z)\n mu_x, lv_x = self.decoder(z)\n x_recon = self.reparameterize(mu_x, lv_x)\n\n # reconstruction_loss = nn.functional.mse_loss(x, x_recon, reduction='none').view(x.shape[0], -1).sum(1).mean()\n # kl = -(lv_z*0.5) - 0.5 + 0.5 * (torch.exp(lv_z) + mu_z ** 2)\n # kl = kl.sum(1).mean()\n\n reconstruction_loss = -log_normal_pdf(x, mu_x, lv_x).sum(dim=[1,2,3]).mean()\n zeros = torch.zeros_like(z).to(self.device)\n ones = torch.ones_like(z).to(self.device)\n kl = (log_normal_pdf(z, mu_z, lv_z) - log_normal_pdf(z, zeros, ones)).sum(dim=1).mean()\n # print(reconstruction_loss, kl)\n\n ELBO = reconstruction_loss + np.max((kl, 1))\n\n return ELBO, kl, reconstruction_loss\n\n def sample(self, num_samples):\n z = torch.randn([num_samples, self.latent_dim]).to(self.device)\n mu_x, lv_x = self.decoder(z)\n x_recon = self.reparameterize(mu_x, lv_x)\n return x_recon\n\n def interpolations(self, x):\n with torch.no_grad():\n mu_z, lv_z = self.encoder(x)\n z = self.reparameterize(mu_z, lv_z)\n z1, z2 = z.chunk(2, dim=0)\n interpolations = [self.TopDown(z1 * (1 - alpha) + z2 * alpha) for alpha in np.linspace(0, 1, 10)]\n interpolations = torch.stack(interpolations, dim=1).view(-1, 3, 32, 32)\n interpolations_mu = [self.decoder(z1 * (1 - alpha) + z2 * alpha)[0] for alpha in np.linspace(0, 1, 10)]\n interpolations_mu = torch.stack(interpolations_mu, dim=1).view(-1, 3, 32, 32)\n\n return interpolations, interpolations_mu\n\n\nclass ConvVAE2(nn.Module):\n \"\"\" Was lazy with this model so hardcoded everything \"\"\"\n\n def __init__(self, latent_dim=16):\n super(ConvVAE2, self).__init__()\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.latent_dim = latent_dim\n\n self.encoder = [nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1)] + [nn.ReLU()] + \\\n [nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1)] + [nn.ReLU()] + \\\n [nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)] + [nn.ReLU()] + \\\n [nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)]\n self.encoder = nn.Sequential(*self.encoder)\n conv_out_dim = (32 // 16) ** 2 * 256\n self.fc1 = nn.Linear(conv_out_dim, 2 * latent_dim)\n\n self.conv_in_size = (64, 32 // 16, 32 // 16)\n self.fc2 = nn.Linear(latent_dim, np.prod(self.conv_in_size))\n self.decoder = [nn.ReLU()] + [nn.ConvTranspose2d(64, 128, kernel_size=4, stride=2, padding=1)] + \\\n [nn.ReLU()] + [nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)] + \\\n [nn.ReLU()] + [nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1)] + \\\n [nn.ReLU()] + [nn.ConvTranspose2d(32, 6, kernel_size=4, stride=2, padding=1)]\n self.decoder = nn.Sequential(*self.decoder)\n\n def reparameterize(self, mu, lv):\n std = lv.mul(0.5).exp()\n z = mu + torch.randn_like(mu).mul(std)\n return z\n\n def BottomUp(self, x):\n out = self.encoder(x)\n out = out.view(out.shape[0], -1)\n mu_z, lv_z = self.fc1(out).chunk(2, dim=1)\n return mu_z, lv_z\n\n def TopDown(self, z):\n out = self.fc2(z)\n out = out.view(out.shape[0], *self.conv_in_size)\n mu_x, lv_x = self.decoder(out).chunk(2, dim=1)\n return mu_x, lv_x\n\n def forward(self, x):\n mu_z, lv_z = self.BottomUp(x)\n z = self.reparameterize(mu_z, lv_z)\n mu_x, lv_x = self.TopDown(z)\n x_recon = self.reparameterize(mu_x, lv_x)\n return x_recon\n\n def calc_loss(self, x, beta):\n mu_z, lv_z = self.BottomUp(x)\n z = self.reparameterize(mu_z, lv_z)\n mu_x, lv_x = self.TopDown(z)\n x_recon = self.reparameterize(mu_x, lv_x)\n\n reconstruction_loss = nn.functional.mse_loss(x, x_recon, reduction='none').view(x.shape[0], -1).sum(1).mean()\n\n kl = -(lv_z * 0.5) - 0.5 + 0.5 * (torch.exp(lv_z) + mu_z ** 2)\n kl = kl.sum(1).mean()\n\n ELBO = reconstruction_loss + np.max((kl, 1))\n\n return ELBO, kl, reconstruction_loss\n\n def sample(self, num_samples):\n z = torch.randn([num_samples, self.latent_dim]).to(self.device)\n mu_x, lv_x = self.TopDown(z)\n x_recon = self.reparameterize(mu_x, lv_x)\n return x_recon\n","repo_name":"JohanYe/CS294-158","sub_path":"Hw3/Ex2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32472663745","text":"class Solution(object):\n def containsDuplicate(self, nums):\n s = sorted(nums)\n dim = len(s)\n counter = 1\n if len(nums) == 0:\n return False\n prevnum = s[0]\n while counter < dim:\n thisnum = s[counter]\n if thisnum == prevnum:\n return True\n counter += 1\n prevnum = thisnum\n return False\n","repo_name":"iammax/leetcode","sub_path":"solutions/py/p0217_contains_duplicate.py","file_name":"p0217_contains_duplicate.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14047578768","text":"#!/usr/bin/env python3\n#Written by Elias Eskelinen\nimport sys\nimport time\n\n\nclass item:\n name = \"\"\n values = []\n links = []\n def __init__(self, name1, values1, links):\n self.name = name1\n self.values = values1\n self.links = links\nsrcfile = \"items.txt\"\nitems = []\n\ndef flatten(all):\n com = []\n for i in all:\n com.append(i.name)\n for x in i.values:\n com.append(x)\n return com\ndef index(all, title):\n ij = 0\n for i in all:\n if len(i.values) > 0:\n for x in i.values:\n if x.lower == title.lower:\n return ij\n ij = ij + 1\n else:\n if i.lower == title.lower:\n return ij\n ij = ij + 1\n raise \"not found lol\"\ndef count(all, title):\n c = 0\n for i in all:\n if i.name == title.lower:\n c = c + 1\n if len(i.values > 0):\n for v in i.values:\n if i.lower == title.lower:\n c = c + 1\n return c\ndef score(link, all, spe=False):\n try:\n ind = index(all, link)\n# ind = all.index(link)\n except Exception as e:\n try:\n ind = index(all, \"https://\" + link)\n# ind = all.index(\"https://\" + link)\n except Exception as e2:\n try:\n ind = index(all, \"http://\" + link)\n# ind = all.index(\"http://\" + link)\n except Exception as e3:\n try:\n ind = index(all, \"http://\" + link + \"/\")\n # ind = all.index(\"http://\" + link + \"/\")\n except Exception as e4:\n try:\n ind = index(all, \"https://\" + link + \"/\")\n# ind = all.index(\"https://\" + link + \"/\")\n except Exception as e5:\n if spe:\n print(\"Target not in index.\")\n return 0\n #score = all.count(str(ind))\n score = count(all, str(ind))\n return score\ndef ref(all):\n f = []\n c = 0\n for i in all:\n if len(i.values) > 0:\n for x in i.values:\n c = c + 1\n try:\n print( int(x) )\n f.append(x)\n except Exception as e:\n e = \"\"\n try:\n print( int(x.name) )\n f.append(x.name)\n except Exception as e:\n e = \"\"\n c = c + 1\n print(str(c) + \" entries prosessed, \" + str(len(f)) + \" matching entries found.\")\n return f\ndef cls():\n print(\"\\033[H\\033[J\")\ndef load(quiet = True):\n global srcfile, items\n items = []\n with open(srcfile, \"r+\") as f:\n f2 = f.readlines()\n name = \"\"\n values = []\n links = []\n tc = 0\n vc = 0\n ttc = 0\n if not quiet:\n print(\"Loading file:\")\n print(\"-------------\")\n for i in f2:\n f3 = i.replace(\"\\n\",\"\")\n #print(f3)\n if len(f3) > 0:\n if f3[0] == \" \":\n ind = 1\n if f3.find(\":-\") == -1:\n ind = 0\n values.append(f3.replace(\" \",\"\").split(\":-\")[0])\n links.append(f3.replace(\" \",\"\").split(\":-\")[ind])\n tc = tc + 1\n else:\n vc = vc + 1\n if name != \"\":\n ttc = ttc + len(values)\n items.append(item(name, values, links))\n name = f3\n values = []\n links = []\n if name != \"\":\n items.append(item(name, values, []))\n f.close()\n if not quiet:\n print(str(vc) + \" fields found\")\n print(str(tc) + \" values (tabs) found\")\n print(str(ttc) + \" values recorded \\\"passing by\\\"\")\n print(str(len(items)) + \" items in total\")\n print(\"------------\")\n return items\ndef search(query, items2=[]):\n global items\n if items2 == []:\n items2 = items\n results = []\n inp = []\n done = []\n# for i in items:\n# inp.append(i.name)\n# for x in i.values:\n ind = 0\n al = len(items2)\n for i in items2:\n ind = ind + 1\n print(\"Searching... \" + str(ind) + \" / \" + str(al),end=\"\\r\")\n if i.name.upper().find(query.upper()) != -1 and (i not in done):\n results.append(item(i.name, i.values, []))\n done.append(i)\n for x in i.values:\n v = []\n if x.upper().find(query.upper()) != -1 and (x not in done):\n# results.append(i)\n v.append(x)\n done.append(x)\n if len(v) > 0:\n results.append(item(i.name, v, []))\n print()\n return results\ndef throwIDErr():\n raise ValueError(\"Proper authentication needed as an argument\")\ndef test(times = -1):\n from random import randint\n import time\n items = []\n rounds = times\n if times == -1:\n rounds = int(input(\"How many items?\\n>\"))\n print(\"Testing with \" + str(rounds) + \" rounds...\")\n for i in range(rounds):\n #print(\"--- %s seconds ---\" % (time.time() - start_time),end=\"\\r\")\n name = \"\"\n values = []\n for x in range(randint(3,9)):\n name = name + str(randint(0, 9))\n if randint(0,5) == 3:\n for z in range(randint(2,5)):\n values.append(str(randint(0,200)))\n links = []\n ite = item(name, values, links)\n items.append(ite)\n all = 0\n print()\n #print(\"Made up the items!\")\n start_time = time.time()\n print(\"........--- %s seconds ---\" % (time.time() - start_time),end=\"\\r\")\n for i in range(rounds):\n print(\"--- %s seconds ---\" % int(time.time() - start_time) + \"[\" + str(int(100-(rounds-i)/100)) + \"%] \",end=\"\\r\")\n all = all + len(search(str(i), items))\n print(\"........--- %s seconds ---\" % (time.time() - start_time))\n print(str(all) + \" items found\")\n if times == -1:\n input(\"press enter to continue\\n\")\ndef run(id):\n print(\"Welcome \" + id[\"name\"])\n time.sleep(1)\n load()\n print(str( len(items) ) + \" items loaded:\")\n# for i in items:\n# print(i.name)\n# for x in i.values:\n# print(\" \" + str(x))\n# print(\"You can quit anytime by typing \\\"!q\\\" as the search query\")\n exit = False\n q = \"-------------------------------------------------------------------------\"\n# q = input(\"Search\\n>\")\n while not exit:\n cls()\n print(\"SEARCH\")\n# print(str( len(items) ) + \" items loaded.\")\n print(\"You can quit anytime by typing \\\"!q\\\" as the search query\")\n res = search(q)\n for i in res:\n print(\"Item name: \" + i.name)\n print(\"Item values: \")\n for x in i.values:\n print(\" \" + str(x) + \" : \")\n print(\"---------\")\n print(str(len(res)) + \" results.\")\n q = input(\">\")\n if q == \"!q\":\n exit = True\n if q == \"!test\":\n test()\n if q == \"!t\":\n test(1)\n test(10)\n test(100)\n test(1000)\n test(10000)\n input(\"press enter to continue\")\n if q == \"!s\":\n l = input(\"Term/link: \")\n print(str(l) + \" has a score of \" + str(score(l, items, True)))\n input(\"Press enter to continue.\")\n if q == \"!n\":\n ref(items)\n input(\"Press enter to continue.\")\n if q == \"!f\":\n f = flatten(items)\n print(\"Norm len: \" + str(len(items)) + \", flat len: \" + str(len(f)))\n input(\"Press enter to continue.\")\nif __name__ == \"__main__\":\n run({\"name\":\"guest\"})\n","repo_name":"xypine/crawler","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6181149980","text":"data = []\nwith open(\"input.txt\", \"r\") as file:\n data = [line.strip() for line in file.readlines()]\n\ndef snafu_to_decimal(snafu):\n decimal = 0\n snafu = snafu[::-1]\n place = 1\n for letter in snafu:\n if letter in \"0123456789\":\n decimal += int(letter) * place\n else:\n decimal += -1 * place if letter == \"-\" else -2 * place\n place *= 5\n return decimal\n\ndef decimal_to_snafu(decimal):\n # Nope that took too long );\n # q = [\"\"]\n # # BFS to find the right snafu because parsing sounds like a headache\n # while q:\n # snafu = q.pop(0)\n # if snafu_to_decimal(snafu) == decimal:\n # return snafu\n # else:\n # for letter in \"-=012\":\n # q.append(snafu + letter)\n\n base_five = []\n while decimal > 0:\n base_five.append(decimal % 5)\n decimal = decimal // 5\n for i, num in enumerate(base_five):\n if num not in [-2, -1, 0, 1, 2]:\n if i == len(base_five) - 1:\n base_five.append(1)\n else:\n base_five[i + 1] += 1\n base_five[i] = num - 5\n base_five.reverse()\n output = \"\"\n for num in base_five:\n if num in [0, 1, 2]:\n output += str(num)\n elif num == -1:\n output += \"-\"\n else:\n output += \"=\"\n return output\n\n \ntotal = sum([snafu_to_decimal(snafu) for snafu in data])\nprint(decimal_to_snafu(total))\n\n\n# AHSUFIHDSUILFHUQWILAHUFILSDHUILHVEWIULVHUILQAGSUIVHUILVIUSGDBILASHCVIUDLSGBVUIELHF\n# I DID IT\n# 50 FREAKING STARS","repo_name":"EricKugel/Advent-Of-Code-2022","sub_path":"Day 25/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23772719545","text":"from torch.utils.data import Dataset\nfrom copy import deepcopy\nimport numpy as np\n\nimport os\n\nimport cv2\nfrom tqdm import tqdm\n\nimport torch\n\nimport logging\nimport json\n\nfrom SoccerNet.Downloader import getListGames\nfrom SoccerNet.Downloader import SoccerNetDownloader\nfrom SoccerNet.Evaluation.utils import AverageMeter, EVENT_DICTIONARY_V2, INVERSE_EVENT_DICTIONARY_V2\nfrom SoccerNet.Evaluation.utils import EVENT_DICTIONARY_V1, INVERSE_EVENT_DICTIONARY_V1\n\n\n\ndef feats2clip(feats, stride, clip_length, padding = \"replicate_last\", off=0):\n if padding ==\"zeropad\":\n print(\"beforepadding\", feats.shape)\n pad = feats.shape[0] - int(feats.shape[0]/stride)*stride\n print(\"pad need to be\", clip_length-pad)\n m = torch.nn.ZeroPad2d((0, 0, clip_length-pad, 0))\n feats = m(feats)\n print(\"afterpadding\", feats.shape)\n # nn.ZeroPad2d(2)\n\n idx = torch.arange(start=0, end=feats.shape[0]-1, step=stride)\n idxs = []\n for i in torch.arange(-off, clip_length-off):\n idxs.append(idx+i)\n idx = torch.stack(idxs, dim=1)\n\n if padding==\"replicate_last\":\n idx = idx.clamp(0, feats.shape[0]-1)\n # print(idx)\n return feats[idx,...]\n\n\nclass SoccerNetClips(Dataset):\n\n def __init__(self, path, features=\"ResNET_PCA512.npy\", split=[\"train\"], version=2, \n framerate=2, window_size=15, custom_feature_path=\"C:\\\\Users\\\\91995\\\\OneDrive - Georgia Institute of Technology\\\\vit_features\", n = 500):\n self.path = path\n self.listGames = getListGames(split)\n self.features = features\n self.window_size_frame = window_size*framerate\n self.version = version\n self.custom_feature_path=custom_feature_path\n if version == 1:\n self.num_classes = 3\n self.labels=\"Labels.json\"\n elif version == 2:\n self.dict_event = EVENT_DICTIONARY_V2\n self.num_classes = 17\n self.labels=\"Labels-v2.json\"\n\n logging.info(\"Checking/Download features and labels locally\")\n downloader = SoccerNetDownloader(path)\n if self.features != \"custom_vit\":\n\n downloader.downloadGames(files=[self.labels, f\"1_{self.features}\", f\"2_{self.features}\"], split=split, verbose=False, randomized=True)\n # vit features link # https://gtvault.sharepoint.com/:f:/s/Alphas/Eo8u3Gc5jslBhBV13wYlTL0BrhwqMPWxgC2CBGM2zqO2cg?e=ldhIO5\n logging.info(\"Pre-compute clips\")\n\n self.game_feats = list()\n self.game_labels = list()\n\n self.game_frames = list()\n\n # game_counter = 0\n for it, game in enumerate(tqdm(self.listGames)):\n if self.features == \"custom_vit\":\n game_feature_path = game.replace(os.path.sep, '_')\n\n feat_half1 = np.load(os.path.join(self.custom_feature_path, f\"{game_feature_path}_1.npy\"))\n feat_half1 = feat_half1.reshape(-1, feat_half1.shape[-1])\n feat_half2 = np.load(os.path.join(self.custom_feature_path, f\"{game_feature_path}_2.npy\"))\n feat_half2 = feat_half2.reshape(-1, feat_half2.shape[-1])\n\n\n elif self.features == \"frames\":\n if it>n:break\n game_feature_path = game.replace(os.path.sep, '_')\n feat_half1_path = os.path.join(self.custom_feature_path, game_feature_path + \"_1\")\n feat_half2_path = os.path.join(self.custom_feature_path, game_feature_path + \"_2\")\n frames_1 = os.listdir(feat_half1_path)\n frames_1 = [frame for frame in frames_1 if frame[-4:] == \".jpg\"]\n frames_1 = sorted(frames_1, key = lambda x: int(x[:-4]))\n feat_half1 = []\n for frame in frames_1:\n frame_path = os.path.join(feat_half1_path, frame)\n frame_attr = cv2.imread(frame_path)\n feat_half1.append(frame_attr)\n feat_half1 = torch.tensor(np.asarray(feat_half1)).permute(0, 3, 1, 2).detach().numpy()\n\n frames_2 = os.listdir(feat_half2_path)\n frames_2 = [frame for frame in frames_2 if frame[-4:] == \".jpg\"]\n frames_2 = sorted(frames_2, key = lambda x: int(x[:-4]))\n feat_half2 = []\n for frame in frames_2:\n frame_path = os.path.join(feat_half2_path, frame)\n frame_attr = cv2.imread(frame_path)\n feat_half2.append(frame_attr)\n feat_half2 = torch.tensor(np.asarray(feat_half2)).permute(0, 3, 1, 2).detach().numpy()\n frames_feat_1 = deepcopy(feat_half1)\n frames_feat_2 = deepcopy(feat_half2)\n \n else:\n feat_half1 = np.load(os.path.join(self.path, game, \"1_\" + self.features))\n feat_half1 = feat_half1.reshape(-1, feat_half1.shape[-1])\n feat_half2 = np.load(os.path.join(self.path, game, \"2_\" + self.features))\n feat_half2 = feat_half2.reshape(-1, feat_half2.shape[-1])\n\n feat_half1 = feats2clip(torch.from_numpy(feat_half1), stride=self.window_size_frame, clip_length=self.window_size_frame)\n feat_half2 = feats2clip(torch.from_numpy(feat_half2), stride=self.window_size_frame, clip_length=self.window_size_frame)\n\n # Load labels\n labels = json.load(open(os.path.join(self.path, game, self.labels)))\n\n\n \n \n label_half1 = np.zeros((feat_half1.shape[0], self.num_classes+1), dtype=np.float32)\n label_half1[:,0]=1 # those are BG classes\n label_half2 = np.zeros((feat_half2.shape[0], self.num_classes+1), dtype=np.float32)\n\n label_half2[:,0]=1 # those are BG classes\n\n\n for annotation in labels[\"annotations\"]:\n\n time = annotation[\"gameTime\"]\n event = annotation[\"label\"]\n\n half = int(time[0])\n\n minutes = int(time[-5:-3])\n seconds = int(time[-2::])\n frame = framerate * ( seconds + 60 * minutes ) \n\n if version == 1:\n if \"card\" in event: label = 0\n elif \"subs\" in event: label = 1\n elif \"soccer\" in event: label = 2\n else: continue\n elif version == 2:\n if event not in self.dict_event:\n continue\n label = self.dict_event[event]\n\n # if label outside temporal of view\n if half == 1 and frame//self.window_size_frame>=label_half1.shape[0]:\n continue\n if half == 2 and frame//self.window_size_frame>=label_half2.shape[0]:\n continue\n\n if half == 1:\n label_half1[frame//self.window_size_frame][0] = 0 # not BG anymore\n label_half1[frame//self.window_size_frame][label+1] = 1 # that's my class\n\n if half == 2:\n label_half2[frame//self.window_size_frame][0] = 0 # not BG anymore\n label_half2[frame//self.window_size_frame][label+1] = 1 # that's my class\n\n self.game_feats.append(feat_half1)\n self.game_feats.append(feat_half2)\n self.game_labels.append(label_half1)\n self.game_labels.append(label_half2)\n feat_half1 = None\n feat_half2 = None\n label_half1 = None\n label_half2 = None\n\n if self.features == \"frames\":\n self.game_frames.append(frames_feat_1)\n self.game_frames.append(frames_feat_2)\n frames_feat_1 = None\n frames_feat_2 = None\n\n self.game_feats = np.concatenate(self.game_feats)\n self.game_labels = np.concatenate(self.game_labels)\n if self.features == \"frames\":\n self.game_frames = np.concatenate(self.game_frames)\n self.game_feats = None\n\n\n\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n clip_feat (np.array): clip of features.\n clip_labels (np.array): clip of labels for the segmentation.\n clip_targets (np.array): clip of targets for the spotting.\n \"\"\"\n\n if self.features == \"frames\":\n return self.game_frames[index, :, :, :], self.game_labels[index//(self.window_size_frame), :]\n else:\n return self.game_feats[index,:,:], self.game_labels[index,:]\n\n def __len__(self):\n if self.features == \"frames\":\n return len(self.game_frames)\n else:\n return len(self.game_feats)\n\n\n\nclass SoccerNetClipsTesting(Dataset):\n def __init__(self, path, features=\"ResNET_PCA512.npy\", split=[\"test\"], version=1, \n framerate=2, window_size=15, custom_feature_path=\"C:\\\\Users\\\\91995\\\\OneDrive - Georgia Institute of Technology\\\\vit_features\"):\n self.path = path\n self.listGames = getListGames(split)\n self.features = features\n self.window_size_frame = window_size*framerate\n self.framerate = framerate\n self.version = version\n self.split=split\n self.custom_feature_path=custom_feature_path\n if version == 1:\n self.dict_event = EVENT_DICTIONARY_V1\n self.num_classes = 3\n self.labels=\"Labels.json\"\n elif version == 2:\n self.dict_event = EVENT_DICTIONARY_V2\n self.num_classes = 17\n self.labels=\"Labels-v2.json\"\n\n logging.info(\"Checking/Download features and labels locally\")\n downloader = SoccerNetDownloader(path)\n if self.features != \"custom_vit\":\n for s in split:\n if s == \"challenge\":\n downloader.downloadGames(files=[f\"1_{self.features}\", f\"2_{self.features}\"], split=[s], verbose=False,randomized=True)\n else:\n downloader.downloadGames(files=[self.labels, f\"1_{self.features}\", f\"2_{self.features}\"], split=[s], verbose=False,randomized=True)\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n feat_half1 (np.array): features for the 1st half.\n feat_half2 (np.array): features for the 2nd half.\n label_half1 (np.array): labels (one-hot) for the 1st half.\n label_half2 (np.array): labels (one-hot) for the 2nd half.\n \"\"\"\n # Load features\n if self.features == \"custom_vit\":\n\n game_feature_path = self.listGames[index].replace(os.path.sep, '_')\n\n feat_half1 = np.load(os.path.join(self.custom_feature_path, f\"{game_feature_path}_1.npy\"))\n feat_half1 = feat_half1.reshape(-1, feat_half1.shape[-1])\n feat_half2 = np.load(os.path.join(self.custom_feature_path, f\"{game_feature_path}_2.npy\"))\n feat_half2 = feat_half2.reshape(-1, feat_half2.shape[-1])\n else:\n feat_half1 = np.load(os.path.join(self.path, self.listGames[index], \"1_\" + self.features))\n feat_half1 = feat_half1.reshape(-1, feat_half1.shape[-1])\n feat_half2 = np.load(os.path.join(self.path, self.listGames[index], \"2_\" + self.features))\n feat_half2 = feat_half2.reshape(-1, feat_half2.shape[-1])\n\n\n # Load labels\n label_half1 = np.zeros((feat_half1.shape[0], self.num_classes))\n label_half2 = np.zeros((feat_half2.shape[0], self.num_classes))\n\n # check if annoation exists\n if os.path.exists(os.path.join(self.path, self.listGames[index], self.labels)):\n labels = json.load(open(os.path.join(self.path, self.listGames[index], self.labels)))\n\n for annotation in labels[\"annotations\"]:\n\n time = annotation[\"gameTime\"]\n event = annotation[\"label\"]\n\n half = int(time[0])\n\n minutes = int(time[-5:-3])\n seconds = int(time[-2::])\n frame = self.framerate * ( seconds + 60 * minutes ) \n\n if self.version == 1:\n if \"card\" in event: label = 0\n elif \"subs\" in event: label = 1\n elif \"soccer\" in event: label = 2\n else: continue\n elif self.version == 2:\n if event not in self.dict_event:\n continue\n label = self.dict_event[event]\n\n value = 1\n if \"visibility\" in annotation.keys():\n if annotation[\"visibility\"] == \"not shown\":\n value = -1\n\n if half == 1:\n frame = min(frame, feat_half1.shape[0]-1)\n label_half1[frame][label] = value\n\n if half == 2:\n frame = min(frame, feat_half2.shape[0]-1)\n label_half2[frame][label] = value\n\n \n \n\n feat_half1 = feats2clip(torch.from_numpy(feat_half1), \n stride=1, off=int(self.window_size_frame/2), \n clip_length=self.window_size_frame)\n\n feat_half2 = feats2clip(torch.from_numpy(feat_half2), \n stride=1, off=int(self.window_size_frame/2), \n clip_length=self.window_size_frame)\n\n \n return self.listGames[index], feat_half1, feat_half2, label_half1, label_half2\n\n def __len__(self):\n return len(self.listGames)\n\n","repo_name":"gucifer/Football-Action-Recognition","sub_path":"training_modules/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":13513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37670475589","text":"#%% Secondo\nimport numpy as np\nimport pandas as pd\n# import matplotlib.pyplot as plt\nimport sklearn.model_selection as ms\nimport sklearn.metrics as mt\nimport xgboost as xgb\n# from sklearn.ensemble import GradientBoostingClassifier as xgb\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.base import TransformerMixin\n\nfrom sklearn.naive_bayes import MultinomialNB\nclf = MultinomialNB()\n\n#%% Read data section\nfileTrain = pd.read_csv(\"loan_clean.csv\", sep =\",\")\n\nprint(fileTrain[\"loan_status\"].value_counts())\n\n# fileTrain = fileTrain.drop_duplicates(subset='ID')\n\nnew = [u'loan_amnt', u'term', u'int_rate', u'installment', u'emp_length',\n u'home_ownership', u'annual_inc', u'verification_status', u'purpose', u'zip_code', u'addr_state', u'dti',\n u'delinq_2yrs', u'inq_last_6mths', u'mths_since_last_delinq',\n u'mths_since_last_record', u'open_acc', u'pub_rec', u'revol_bal',\n u'revol_util', u'total_acc', u'initial_list_status',\n u'mths_since_last_major_derog', u'last_first_credit_diff']\nnew2 = [u'term', u'int_rate', u'emp_length', u'purpose', u'initial_list_status']\n\nlabel = fileTrain.pop(u'loan_status')\ndf_clean_norm = (fileTrain-fileTrain.mean())/(fileTrain.max()-fileTrain.min())\n\nX_train, X_test, y_train, y_test = ms.train_test_split(df_clean_norm[new],label, test_size = 0.3)\nprint(fileTrain.columns)\n#%%\n\nnum_round = 50\ndtrain = xgb.DMatrix(X_train, label=y_train)\ndtest = xgb.DMatrix(X_test, label=y_test)\n\nevallist = [(dtest,'eval'), (dtrain,'train')]\nparam = {'objective':'multi:softmax','base_score':0.5, 'silent':1, 'eval_metric': ['ndcg'],'eta':0.3,'gamma':30,'max_depth':7,\n 'min_child_weight':20,'lambda':1,'alpha':0.5,'scale_pos_weight':1,'updater':'grow_local_histmaker,prune' }\n\n\nparam1 = {'objective':'binary:logistic','base_score':0.5, 'silent':1, 'eval_metric': ['ndcg'],'eta':0.5,'gamma':25,'max_depth':7,\n 'min_child_weight':1,'lambda':0.5,'alpha':0.2,'scale_pos_weight':1,'updater':'grow_local_histmaker,prune' }\n\nbst = xgb.train( param1, dtrain, num_round, evallist )\n# bst = MultinomialNB(alpha=0.1)\n# bst.fit(X_train, y_train)\n\ny_predProb1 = bst.predict(dtest)\n#%%\nthreshold = 0.20\ny_pred1 = y_predProb1[:].copy()\ny_pred1[y_pred1> threshold] = 1\ny_pred1[y_pred1<= threshold] = 0\n\n# y_pred1 = bst.predict(X_test)\n\n\nprint(mt.f1_score(y_test,y_pred1))\nprint(mt.accuracy_score(y_test,y_pred1))\nprint(mt.confusion_matrix(y_test,y_pred1))\n","repo_name":"marcomiglionico94/CS-412-Final-Project","sub_path":"cs412-2017-project-master/code/xgb-ml.py","file_name":"xgb-ml.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14253196913","text":"from scipy import signal as signal\nimport numpy as np\nimport csv\n\nimport matplotlib\n\nimport matplotlib.pyplot as plt\n\nInputfilepath = \"/Users/jackieallex/Downloads/Mocap-Cyr-Wheel/input_tsv_files\"\ncutoff = 10.0\nframerate = 1200.0\n\ncolors = ['b', 'g', 'k', 'm', 'c', 'y']\ncolors2 = ['c', 'y', '.5', 'r']\nmarkers = ['*', 'o']\n\n\nw = float(cutoff/ (framerate/2.0))\nheader_start = 27\n\ninput_force_plate_arr = []\ninput_force_plate_arr.append(\"/Users/jackieallex/Downloads/Mocap-Cyr-Wheel/input_tsv_files/Force_Plate_Data/WheelForcePlate0007_f_1.tsv\")\ninput_force_plate_arr.append(\"/Users/jackieallex/Downloads/Mocap-Cyr-Wheel/input_tsv_files/Force_Plate_Data/WheelForcePlate0007_f_2.tsv\")\ninput_force_plate_arr.append(\"/Users/jackieallex/Downloads/Mocap-Cyr-Wheel/input_tsv_files/Force_Plate_Data/WheelForcePlate0007_f_3.tsv\")\ninput_force_plate_arr.append(\"/Users/jackieallex/Downloads/Mocap-Cyr-Wheel/input_tsv_files/Force_Plate_Data/WheelForcePlate0007_f_4.tsv\")\ninput_force_plate_arr.append(\"/Users/jackieallex/Downloads/Mocap-Cyr-Wheel/input_tsv_files/Force_Plate_Data/WheelForcePlate0007_f_5.tsv\")\n\n'''\ndef create_data_arr_force_plate(frame, plate_id):\n current_row = force_file[plate_id][frame + 27]\n cols, rows = (3, 3)\n arr = [[None]*cols for _ in range(rows)]\n count = 0\n count_row = 0\n for x in range(2, 11):\n if count == 3:\n count = 0\n count_row += 1\n arr[count_row][count] = current_row[x]\n count += 1\n return arr\n'''\n\n#an array holding all force files info\nforce_file = []\n#hold force plate data for one frame\nforce_plate_arr = []\n#array holding arrays of each force plate's X_corner_pos, X_corner_neg, Neg_x_neg, pos_x_neg\nforce_plate_positions = []\n\nfor x in range(len(input_force_plate_arr)):\n with open(input_force_plate_arr[x], \"r\") as tsv_file:\n force_file_temp = list(csv.reader(tsv_file, delimiter='\\t'))\n new_temp_row = []\n new_temp_full = []\n for n in range(len(force_file_temp)):\n if n >= 27:\n counter = 0\n temp_arr = []\n for m in range(len(force_file_temp[n])):\n if force_file_temp[n][m] is not \"\" and m>=2:\n if(counter < 2):\n temp_arr.append(float(force_file_temp[n][m]))\n counter += 1\n if(counter == 2):\n counter = 0\n temp_arr.append(float(force_file_temp[n][m]))\n new_temp_row.append(temp_arr)\n temp_arr = []\n new_temp_full.append(new_temp_row)\n new_temp_row = []\n #print(new_temp_full)\n filtData = np.array(new_temp_full)\n\n #plot original data\n plt.plot(filtData[:, 0, 1], markers[1], color = colors[x])\n plt.xlabel('frame')\n plt.ylabel('force')\n\n plt.savefig(Inputfilepath + '/unfiltered' + str(x) + '.png')\n amount_of_points = len(new_temp_full[0])\n\n for ii in range(amount_of_points):\n for kk in range(3):\n #print(w)\n b, a = signal.butter(4, w, 'low')\n #import pdb; pdb.set_trace()\n filtData[:,ii, kk] = signal.filtfilt(b, a, filtData[:,ii,kk])\n #plot filtered\n plt.plot(filtData[:, 0, 1], markers[1], color = colors[x])\n plt.xlabel('frame')\n plt.ylabel('force')\n plt.savefig(Inputfilepath + '/filtered' + str(x) + '.png')\n np.save(Inputfilepath + '/FiltFP_' + str(x) + '.npy', filtData)\n \nprint(force_file)\n","repo_name":"HuMoN-Research-Lab/Mocap-Cyr-Wheel","sub_path":"script_parts/butterworth.py","file_name":"butterworth.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34623391383","text":"#!/usr/bin/env python3\n\n# Created by Amir Mersad\n# Created on September 2019\n# This program accepts users with age between 25 and 40 for dating\n\n\ndef main():\n # This function accepts users with age between 25 and 40 for dating\n\n # Input\n age_str = input(\"Please enter you age: \")\n\n # Process and Output\n try:\n age = int(age_str)\n if age > 25 and age < 40:\n print(\"You are accepted to date the girl and do other stuff\")\n elif age < 25:\n print(\"You are young, come back later when you are older\")\n else:\n print(\"You are too old!\")\n except Exception:\n print(\"Wrong input!!!\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amir-mersad/ICS3U-Unit3-07-Python","sub_path":"date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71397403921","text":"'''\nDescripttion: \nversion: \nAuthor: nlpir team\nDate: 2020-08-07 20:46:23\nLastEditors: Please set LastEditors\nLastEditTime: 2021-05-14 14:35:59\n'''\nimport sys, os\nsys.path.insert(0, os.getcwd())\nfrom corrector_dict.detector_dict import DetectorDict\nfrom utils.text_utils import get_correct_text\nfrom utils.text_utils import split_2_short_text, traditional2simplified, _check_contain_error\nfrom collections import Iterable\n\nclass CorrectorDict(DetectorDict):\n def __init__(self):\n super(CorrectorDict, self).__init__()\n self.name = 'corrector_dict'\n\n def add_dict(self, words, separators='-'):\n \"\"\"增加混淆集对\n\n 传入的参数可以是:一个词语、一个列表、一个元组、甚至是一个文件地址,文件地址里面是包含一行一个词语\n\n 格式1: 只增加一个词\n\n 格式2:增加一个列表\n\n :param words: 可以传列表、文件地址、或者字符串,如果字符串包含separators,则默认为传入混淆集对,\n 比如:度假-渡假\n\n :param separators: 地址分割符,比如:度假-渡假、分割符是:-\n \"\"\"\n if isinstance(words, str):\n words = words.strip() # 去除空格\n if os.path.exists(words): # 判断是否存在该文件\n with open(words, encoding='UTF-8')as fp:\n for word in fp:\n self.add_dict(word, separators)\n elif separators in words: # 判断是否带有分割符\n self.common_confusion.update({words.split(separators)[0]:words.split(separators)[1]})\n else: # 纯字符串\n raise Exception('未指定混淆集的分隔符')\n elif isinstance(words, Iterable): # 迭代器\n for word in words:\n self.add_dict(word, separators)\n \n def correct_dict(self, text, include_symbol=True):\n '''\n Descripttion: 句子改错\n param text\n return 改正后的句子\n '''\n details = []\n blocks = split_2_short_text(text, include_symbol=include_symbol)\n for blk, idx in blocks:\n details = self.correct_dict_short(blk, start_idx=idx, details=details)\n return details\n\n def correct_dict_short(self, sentence, start_idx=0, details=[]):\n detector_words = self.detect_dict_short(sentence)\n for item, begin_idx, end_idx in detector_words:\n if item in self.confusions:\n detail_word = [item, self.confusions[item], begin_idx + start_idx, end_idx + start_idx,'dict']\n details.append(detail_word)\n\n tokens = self.tokenizer.tokenize(sentence)\n for word, begin_idx, end_idx in tokens:\n if _check_contain_error([word, begin_idx + start_idx, end_idx + start_idx], details):\n continue\n word_simplified = traditional2simplified(word)\n if word_simplified != word:\n # 繁化简错误\n detail_word = [word, word_simplified, begin_idx + start_idx, end_idx + start_idx, 'traditional']\n details.append(detail_word)\n \n return details\n\n \nif __name__ == '__main__':\n correct = CorrectorDict()\n err_sentences = [\n # '记者从全国总工会今天召开的2021年“五一”新闻发布会上获悉,憂郁的臺灣烏龜',\n '一带一路於网路',\n '您真是一位好心的维权人士,疆独,六四,装逼,国家粮食局',\n '中国民主党一位的维权人士,疆独',\n '中国民主党',\n '根据中国互联网络信息中心(China Internet Network Information Center,简称CNNIC)于2015年2月发布的第35次',\n '于网路上传播'\n ]\n for err_sent in err_sentences:\n pred_detail = correct.correct_dict(err_sent)\n pred_sent = get_correct_text(err_sent, pred_detail)\n print(pred_sent, pred_detail)\n","repo_name":"cabbageNoob/ESCorrector","sub_path":"corrector_dict/corrector_dict.py","file_name":"corrector_dict.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"3155061380","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'bomberMan' function below.\n#\n# The function is expected to return a STRING_ARRAY.\n# The function accepts following parameters:\n# 1. INTEGER n\n# 2. STRING_ARRAY grid\n#\n\ndef bomberMan(n, grid):\n # Write your code here\n if n == 0:\n return grid\n \n list_grid = []\n for x in range(len(grid)):\n new_line = []\n for y in range(len(grid[x])):\n new_line.append(0)\n list_grid.append(new_line)\n \n \n for x in range(len(grid)):\n for y in range(len(grid[x])):\n if grid[x][y] == '.': \n list_grid[x][y] = -1\n elif grid[x][y] == 'O': \n list_grid[x][y] = 3\n \n for i in range(1, n + 1):\n for x in range(len(list_grid)): ## Timer count\n for y in range(len(list_grid[x])):\n list_grid[x][y] -= 1 \n \n \n for x in range(len(list_grid)): ## Bombs detonation\n for y in range(len(list_grid[x])):\n if list_grid[x][y] == 0:\n if x != 0: \n if list_grid[x - 1][y] != 0:\n list_grid[x - 1][y] = -1\n if x != len(list_grid) - 1:\n if list_grid[x + 1][y] != 0:\n list_grid[x + 1][y] = -1\n if y != 0: \n if list_grid[x][y - 1] != 0:\n list_grid[x][y - 1] = -1\n if y != len(list_grid[x]) - 1:\n if list_grid[x][y + 1] != 0:\n list_grid[x][y + 1] = -1\n \n if i % 2 == 0: #Bomberman is planting bombs\n for x in range(len(list_grid)):\n for y in range(len(list_grid[x])):\n if list_grid[x][y] <= 0:\n list_grid[x][y] = 3\n \n if i == n:\n break\n if i % 4 == (n % 4) + 0:\n if i != 1:\n break \n \n result = [] \n for x in range(len(grid)):\n new_line = str()\n for y in range(len(grid[x])):\n if list_grid[x][y] <= 0:\n new_line += '.'\n elif list_grid[x][y] > 0:\n new_line += 'O'\n result.append(new_line)\n \n \n return result\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n first_multiple_input = input().rstrip().split()\n\n r = int(first_multiple_input[0])\n\n c = int(first_multiple_input[1])\n\n n = int(first_multiple_input[2])\n\n grid = []\n\n for _ in range(r):\n grid_item = input()\n grid.append(grid_item)\n\n result = bomberMan(n, grid)\n\n fptr.write('\\n'.join(result))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"IgnatIvanov/HackerRank","sub_path":"3 Months Preparation Kit/Week 07/The Bomberman Game/The Bomberman Game.py","file_name":"The Bomberman Game.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24537190728","text":"import unittest\n\nimport torch\n\nfrom autocare_dlt.core.model.head import RetinaNetHead\n\n\nclass TestRetinaNetHead(unittest.TestCase):\n def setUp(self):\n self.num_classes = 10\n self.dummy_cfg = dict(\n in_channels=256,\n num_classes=self.num_classes,\n aspect_ratio=[0.5, 1.0, 2.0],\n anchor_size=[32, 64, 128, 256, 512],\n topk_candidates=1000,\n )\n self.dummy_input = [\n torch.rand(2, 256, 64, 64),\n torch.rand(2, 256, 32, 32),\n torch.rand(2, 256, 16, 16),\n torch.rand(2, 256, 8, 8),\n torch.rand(2, 256, 4, 4),\n ]\n self.dummy_labels = [\n {\"boxes\": torch.rand(3, 4), \"labels\": torch.tensor([3, 6, 1])},\n {\"boxes\": torch.rand(1, 4), \"labels\": torch.tensor([0])},\n ]\n self.img_size = [512, 512]\n\n def tearDown(self):\n pass\n\n def test_build_head(self):\n head = RetinaNetHead(**self.dummy_cfg)\n with self.assertRaises(ValueError):\n wrong_in_channels = dict(in_channels=256.0, num_classes=10)\n RetinaNetHead(**wrong_in_channels)\n\n def test_run_head(self):\n head = RetinaNetHead(**self.dummy_cfg)\n head.train()\n pred = head(self.dummy_input, self.img_size)\n self.assertIsInstance(pred, dict)\n self.assertEqual(len(pred), 3)\n\n head.eval()\n res_infer = head(self.dummy_input, self.img_size)\n self.assertEqual(len(res_infer), 3)\n\n num_cand = self.dummy_cfg[\"topk_candidates\"] * len(\n self.dummy_cfg[\"anchor_size\"]\n )\n for r in res_infer:\n self.assertEqual(r.size()[1], num_cand)\n self.assertEqual(len(res_infer[0].size()), 3)\n self.assertEqual(len(res_infer[1].size()), 2)\n self.assertEqual(len(res_infer[2].size()), 2)\n","repo_name":"snuailab/autocare_dlt","sub_path":"tests/core/model/head/test_retinanet_head.py","file_name":"test_retinanet_head.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"71556057042","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 11 11:10:35 2020\n@author: zhaizhengyuan\n全量的离线计算与增量的离线计算\n用于每天凌晨5点的定时全量更新\n以及每隔15(线上30)分钟的增量更新\n\"\"\"\nimport multiprocessing\nimport traceback\nimport datetime\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n# from utils.config import ModifyConfFile, log_logger\nfrom hotshop_offline_com.get_shop_list import HotShopRead\nfrom hotshop_offline_com.com_owners_hotshop import OwnerShop, save_owner_shops, add_time\nfrom hotshop_offline_com.find_new_owner import FindNewOwner\n\n\n# logger = log_logger()\n# mcf = ModifyConfFile(logger)\n# two_mysql = mcf.return_argv()\n\n\nclass IniAndUpdate(object):\n \"\"\"\n 初始化更新类:\n 包含初始化全量计算模块,\n 定时增量更新计算模块,\n 以及定时任务模块。\n \"\"\"\n def __init__(self, two_mysql1, logger1):\n # 根据环境修改配置文件,并实例化该类\n self.logger = logger1\n self.two_mysql = two_mysql1\n hsr = HotShopRead(self.two_mysql, self.logger)\n self.shop_list = hsr.sort_shop_list\n self.begin_time = None\n self.end_time = None\n # self.start()\n\n def full_init(self):\n self.logger.info('-----开始离线初始化全量计算------')\n print(\"start full_init at {}------------------------\".format(\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n try:\n os1 = OwnerShop(self.two_mysql, self.logger, self.shop_list, [])\n owner_shop_dict1 = os1.total_owner_score_dict()\n save_data1 = owner_shop_dict1.items()\n save_data = add_time(save_data1)\n save_owner_shops(self.two_mysql, self.logger, save_data)\n except Exception as e:\n self.logger.error('初始化全量计算异常:{}'.format(e), traceback.print_exc())\n else:\n self.logger.info('全量计算完成,数据更新到表cb_hotshop_owner_rec_shops')\n\n def incre_update(self):\n self.end_time = datetime.datetime.now()\n self.begin_time = self.end_time + datetime.timedelta(minutes=-15)\n self.logger.info('-----开始离线增量计算------')\n print(\"start incre_update at {}------------------------\".format(\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n begin_time = self.begin_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n end_time = self.end_time.strftime(\"%Y-%m-%d %H:%M:%S\")\n try:\n new_owner_list = FindNewOwner(self.two_mysql, self.logger,\n begin_time, end_time).new_owner_list\n if new_owner_list:\n os2 = OwnerShop(self.two_mysql, self.logger, self.shop_list, new_owner_list)\n owner_shop_dict2 = os2.total_owner_score_dict()\n save_data2 = owner_shop_dict2.items()\n save_data = add_time(save_data2)\n save_owner_shops(self.two_mysql, self.logger, save_data)\n print(\"{} new owner is updated------------------------\".format(\n len(new_owner_list)))\n else:\n self.logger.info(\"no new owner is found\")\n except Exception as e:\n self.logger.error('增量计算异常:{}'.format(e), traceback.print_exc())\n else:\n self.logger.info('增量计算完成,数据更新到表cb_hotshop_owner_rec_shops')\n\n @staticmethod\n def scheduler_task(fun, trigger='interval', hour=5, minute=10):\n scheduler = BlockingScheduler()\n # 采用阻塞的方式\n # 采用固定时间(cron)的方式,每天在固定时间执行\n scheduler.add_job(fun, trigger=trigger, hour=hour, minute=minute)\n # scheduler.add_job(fun, trigger='cron', hour=5, minute=10)\n # scheduler.add_job(self.incre_update(), trigger='interval', minutes=15)\n scheduler.start()\n\n def task1(self):\n scheduler = BlockingScheduler()\n # 采用阻塞的方式\n # 采用固定时间(cron)的方式,每天在固定时间执行\n scheduler.add_job(self.full_init, trigger='cron', hour=5, minute=20)\n scheduler.start()\n\n def task2(self):\n scheduler = BlockingScheduler()\n # 采用阻塞的方式\n # 采用固定时间(cron)的方式,每天在固定时间执行\n scheduler.add_job(self.incre_update, trigger='interval', minutes=2)\n scheduler.start()\n\n # def start(self):\n # multiprocessing.Process(target=self.task1).start()\n # multiprocessing.Process(target=self.task2).start()\n\n\ndef start(two_mysql, logger):\n inj = IniAndUpdate(two_mysql, logger)\n inj.full_init()\n multiprocessing.Process(target=inj.task1).start()\n multiprocessing.Process(target=inj.task2).start()\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"xiejunbiao/cloudbrain-recommend","sub_path":"offline_com/hotshop_offline_com/ini_and_update.py","file_name":"ini_and_update.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6439537841","text":"class BaseSchema:\n \"\"\"Base class for all schemas.\"\"\"\n\n _headers = None\n\n def __init__(self):\n attributes_formatted = \"\"\n\n for attr_name, attr_value in vars(self).items():\n if attr_value is None:\n continue\n\n if isinstance(attr_value, dict):\n attributes_formatted += f\"{attr_name}={{{'...' if attr_value else ''}}}, \"\n elif isinstance(attr_value, list):\n attributes_formatted += f\"{attr_name}=[{'...' if attr_value else ''}], \"\n elif attr_value and type(attr_value).__name__[0].isupper():\n attributes_formatted += f\"{attr_name}={type(attr_value).__name__}(...), \"\n else:\n attributes_formatted += f\"{attr_name}={attr_value!r}, \"\n\n self._attributes_formatted = attributes_formatted[:-2]\n\n def __eq__(self, other):\n return vars(self) == vars(other)\n\n def __repr__(self): # pragma: no cover\n return f\"{type(self).__name__}({self._attributes_formatted})\"\n\n @classmethod\n def add_headers(cls, headers: dict) -> None:\n \"\"\"\n Adds headers for subclasses' uses when sending requests (GET/POST).\n\n Parameters:\n headers: A dictionary that is in the format of {\"X-TBA-Auth-Key\": api_key} for TBA be able to authorize sending requests.\n \"\"\"\n cls._headers = headers\n","repo_name":"Shom770/data.frc","sub_path":"src/schemas/base_schema.py","file_name":"base_schema.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14694696011","text":"#!/usr/bin/env python\n\nimport sys\nimport requests\nimport os \nfrom colorama import Fore, Style\nimport socket\nfrom cymruwhois import Client\nfrom pyfiglet import *\nimport argparse\n\ncustom_fig = Figlet(font='graffiti')\nauthor = Figlet(font='graceful')\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', action='store', dest='url',\n \n help='Target url')\nparser.add_argument('--version', action='version', version='%(prog)s 1.0')\n\nresults = parser.parse_args()\n\ndef cookie_check():\n\n print(Fore.BLUE + \"Observables\\n\" + Style.RESET_ALL)\n\n try:\n a_session = requests.Session()\n a_session.get('https://' + results.url)\n session_cookies = a_session.cookies\n cookies_dictionary = session_cookies.get_dict()\n print(cookies_dictionary)\n\n except requests.exceptions.ConnectionError:\n print(\"Cookie: Cookie details could not be processed.\")\n\n except TypeError:\n print(\"NO URL PROVIDED\")\n\ndef enumeration(): \n \n print(Fore.BLUE + \"Target Enumeration\\n\" + Style.RESET_ALL)\n os.system('gobuster dir -q -u' + results.url + '/ -w wordlist.txt -r -a \"Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.48 Mobile Safari/537.36\" --timeout 10s')\n\n while True: \n enumeration_continue = input(\"Would you like to continue? \")\n if enumeration_continue == 'Yes' or enumeration_continue == 'yes':\n enumeration_new = input(\"Enter the directory name: \")\n os.system('gobuster dir -q -u' + results.url + '/' + enumeration_new + '/ -w wordlist.txt -r -a \"Mozilla/5.0 (Linux; Android 12) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.48 Mobile Safari/537.36\" --timeout 10s')\n break\n else: \n continue\n\ndef whois_ip():\n \n try: \n domainip = socket.gethostbyname(results.url)\n c = Client()\n r = c.lookup(domainip)\n print(Fore.BLUE + \"Site Information\\n\" + Style.RESET_ALL)\n print(\"ASN: \" + r.asn)\n print(\"ASN Owner: \" + r.owner)\n print(\"Host IP: \" + r.ip)\n except TypeError:\n os.system('cls' if os.name == 'nt' else 'clear')\n print(\"\\nNO URL PROVIDED.\\n\")\n sys.exit()\n \nwhile True: \n \n os.system('cls' if os.name == 'nt' else 'clear')\n print(Fore.CYAN + custom_fig.renderText(\"ConPhisher\") + Style.RESET_ALL)\n print(Fore.CYAN + (\"❌ This tool is not intended for malicious purposes ❌\") + Style.RESET_ALL)\n print(Fore.CYAN + (\"👉 github.com/raid-sailor\") + Style.RESET_ALL)\n os.system('cls' if os.name == 'nt' else 'clear')\n print(Fore.CYAN + custom_fig.renderText(\"ConPhisher\") + Style.RESET_ALL)\n print(Fore.CYAN + (\"❌ This tool is not intended for malicious purposes ❌\") + Style.RESET_ALL)\n print(Fore.CYAN + (\"👉 github.com/raid-sailor\") + Style.RESET_ALL)\n print(\"\\n====================================================\\n\")\n cookie_check()\n print(\"\\n====================================================\\n\")\n whois_ip()\n print(\"\\n====================================================\\n\")\n input(\"Press enter to continue to enumeration\")\n print(\"\\n====================================================\\n\")\n enumeration()\n sys.exit()\n\n\n","repo_name":"raid-sailor/ConPhisher","sub_path":"main-script/conphisher.py","file_name":"conphisher.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16770227295","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom . models import Student\nfrom django.views.generic.edit import CreateView,UpdateView,DeleteView\nfrom django.views.generic import ListView,DetailView\nfrom django.contrib import messages\nfrom urllib import request\n\n\n# Create your views here.\n\n\ndef student_home(request):\n return render(request,'student_home.html')\n\nclass add_student(CreateView):\n model=Student\n template_name='add_student.html'\n fields=['name','email','phone','rollno','gender','department','address','password','image']\n def form_valid(self,form):\n instance=form.save(commit=False)\n #print(\"svrjfjabybfs\")\n instance.save()\n messages.success(self.request,'your contact added sucessfully')\n return redirect('lecturer_home')\n\n\nclass view_student(ListView):\n template_name='view_lecturer_students.html'\n model=Student\n context_object_name='students'\n\n def get_queryset(self):\n # lid=self.request.session.get('id_')\n # print(lid)\n students=super().get_queryset()\n return students\n\n# def view_student(request,id):\n# context = {\n# model:Student\n# context_object_name:'students'\n# 'contact':get_object_or_404(Contact,pk=id)\n# }\n# return render(request,'view_student.html',context)\n\n\ndef view(request):\n lid=request.session.get('id_')\n dept=request.session.get('dept_')\n print(lid)\n print(dept)\n context={\n 'l_id':lid,\n 'l_dept':dept\n }\n return render(request,'empty.html',context)\n\n\nclass view_student_detail(DetailView):\n template_name='view_student_detail.html'\n model=Student\n context_object_name='student'\n print('details ')\n\n\ndef view_student_profile(request):\n id=request.session.get('id_')\n stud=Student.objects.get(pk=id)\n context={\n 'stu':stud\n }\n return render(request,'view_student_profile.html',context)\n\n\n\n# class view_student_profile(DetailView):\n# template_name='view_student_profile.html'\n# model=Student\n# context_object_name='student'\n# print('details ')\n\n# class delete_student(DeleteView):\n# model=Student\n# template_name='delete_student.html'\n# success_url='/'\n\n# def delete(self,request,*args,**kwargs):\n# print('delete')\n# messages.success(self.request,'Your Lecturer has been saved succesfully deleted!')\n# return super().delete(self,request,*args,**kwargs)\n\ndef delete_student(request,pk=None):\n if pk:\n one_task = Student.objects.get(id = pk)\n one_task.delete()\n return render(request,'lecturer_home.html')\n\n\n\ndef student_login(request):\n \n # username = password = ''\n if request.method==\"POST\":\n print()\n username = request.POST['username']\n password = request.POST['password']\n print(username)\n print(password)\n # projection = ['Administration.administration_administration.*']\n # sql = \"\"\"SELECT * FROM administration_administration WHERE email=\"\"\"+username\n # print(sql)\n for p in Student.objects.raw('SELECT * FROM students_student'):\n if(p.email==username):\n if(p.password==password):\n request.session['roolno_']=p.rollno\n request.session['name_']=p.name\n request.session['id_']=p.id\n request.session['dept_']=p.department\n print(request.session.get('dept_'))\n return redirect('student_home')\n return render(request,'registration/student_login.html',{})\n\nclass update_student(UpdateView):\n model = Student\n template_name = 'update_student.html'\n fields = ['name','email','phone','info','gender','image','password','address']\n success_url = '/'\n\n def form_valid(self,form):\n instance = form.save()\n messages.success(self.request,'Your Contact has been saved succesfully updated!')\n return redirect('student_home')\n\ndef update(request):\n lid=request.session.get('id_')\n dept=request.session.get('dept_')\n\n context={\n 'l_id':lid,\n 'l_dept':dept\n }\n return render(request,'empty.html',context)\n\ndef feepayment(request):\n return render(request,'feepayment.html',{})\n\ndef view_department_students(request,pk=None):\n if pk==1:\n dept = 'IT'\n for p in Student.objects.raw('SELECT * FROM students_student'):\n if(p.department==dept):\n context={\n 'student':p\n }\n return render(request,'view_department_students.html',context)\n if pk==2:\n dept = 'CSE'\n for p1 in Student.objects.raw('SELECT * FROM students_student'):\n if(p1.department==dept):\n context={\n 'student':p1\n }\n return render(request,'view_department_students.html',context)\n if pk==3:\n dept = 'ECE'\n for p2 in Student.objects.raw('SELECT * FROM students_student'):\n if(p2.department==dept):\n context={\n 'student':p2\n }\n return render(request,'view_department_students.html',context)\n if pk==4:\n dept = 'EEE'\n for p3 in Student.objects.raw('SELECT * FROM students_student'):\n if(p3.department==dept):\n context={\n 'student':p3\n }\n return render(request,'view_department_students.html',context)\n\n","repo_name":"18031J0014/django_ums","sub_path":"students/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15145480740","text":"# -*- encoding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport copy\n\nfrom opensearchsdk.base import Base\nfrom opensearchsdk.core.httputil import HttpConnection\nfrom opensearchsdk import log\nfrom opensearchsdk.config import Config\nfrom opensearchsdk.authentication import SignatureBuilder\n\n\n__author__ = 'barycenter'\n\ndef singleton(cls, *args, **kw):\n \"\"\"\n singleton mode to create client\n :param cls:\n :param args:\n :param kw:\n :return:\n \"\"\"\n instances = {}\n\n def _singleton(*args, **kw):\n if cls not in instances:\n instances[cls] = cls(*args, **kw)\n return instances[cls]\n return _singleton\n\n@singleton\nclass OpenSearchClient(Base):\n\n def __init__(self, zone, url_type, access_id, access_key, proxy_host=None, proxy_port='80'):\n self.__zone = zone\n self.__zone_url = Config.get_zone_url(zone, url_type)\n self.__access_id = access_id\n self.__access_key = access_key\n self.__proxy_host = proxy_host\n self.__proxy_port = proxy_port\n\n def build_signature(self, params, method='GET'):\n\n params_send = copy.deepcopy(params)\n builder = SignatureBuilder(params_send)\n params_send = builder.build_signature(access_key_id=self.__access_id,\n secret_key=self.__access_key,\n request_method=method)\n return params_send\n\n def get_connection(self):\n \"\"\"\n create a http connection\n :return:\n \"\"\"\n return HttpConnection(proxy_host=self.__proxy_host, proxy_port=self.__proxy_port)\n\n def send_message(self, url, method='GET', params=dict(), files=dict()):\n conn = self.get_connection()\n params_send = self.build_signature(params, method)\n url = self.__zone_url + url\n conn.build_request(url, params=params_send, files=files, method=method)\n resp = conn.execute_request()\n return resp\n\n\nclass OpensearchClientFactory(Base):\n @classmethod\n def create_client(cls, zone, url_type, access_id, access_key, proxy_host=None, proxy_port='80'):\n return OpenSearchClient(zone, url_type, access_id, access_key, proxy_host, proxy_port)\n\n\n","repo_name":"rozhao2/opensearch-sdk","sub_path":"opensearchsdk/core/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"8251274290","text":"from lib import DotCollection\nfrom lib import algorithm\nfrom lib import visualizer\nimport subprocess\n\nif __name__ == \"__main__\":\n ### Input ###\n valid = False\n while (not(valid)) :\n n = int(input(\"Masukkan banyak titik (n): \"))\n nDim = int(input(\"Masukkan dimensi titik (nDim): \"))\n if (n <= 1 or nDim < 1) :\n print(\"Masukan tidak valid. Pastikan banyak titik > 1 dan dimensi titik >= 1\")\n else :\n valid = True\n\n ### Process ###\n\n ## Generating Dots ##\n listOfDotDnC = DotCollection.DotCollection(n, nDim)\n # algorithm.sortArrOfDot(listOfDotDnC.getArrOfDot())\n listOfDotBF = DotCollection.DotCollection()\n listOfDotDnC.copy(listOfDotBF)\n ## Calculate Shortest Distance, time, etc ##\n # Divide and Conquer\n algorithm.divideAndConquerShortestDistance(listOfDotDnC)\n # Brute Force\n algorithm.bruteForceShortestDistance(listOfDotBF)\n\n ### Output ###\n \n getModel = subprocess.Popen(['wmic', 'computersystem', 'get', 'model'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n model = getModel.stdout.read().decode()\n choice = input(\"Simpan output ke file ? Y/N\\n\")\n if (choice == 'Y') :\n fileName = input(\"Masukkan nama file : \")\n f = open(fileName, \"w\", encoding=\"utf-8\")\n # Divide and Conquer\n f.write(\"Divide and Conquer\\n\")\n f.write(\"Closest Points: (\" + str(listOfDotDnC.getClosestPoints()\n [0].getCoordinate()) + \",\" + str(listOfDotDnC.getClosestPoints()[1].getCoordinate())+ \")\\n\")\n f.write(\"Distance: \"+ str(listOfDotDnC.getShortestDistance()) + \"\\n\")\n\n # Brute Force\n f.write(\"Brute Force\\n\")\n f.write(\"Closest Points: (\"+ str(listOfDotBF.getClosestPoints()\n [0].getCoordinate())+ \",\" + str(listOfDotBF.getClosestPoints()[1].getCoordinate())+ \")\\n\")\n f.write(\"Distance: \"+ str(listOfDotBF.getShortestDistance()) + \"\\n\")\n\n ## Banyak Operasi Perhitungan Rumus Euclidean ##\n # Divide and Conquer\n f.write(\"N perhitungan\\n\")\n f.write(\"Divide and Conquer: \"+ str(listOfDotDnC.getNStep()) + \"\\n\")\n\n # Brute Force\n f.write(\"Brute Force: \"+ str(listOfDotBF.getNStep()) + \"\\n\")\n\n ## Execution Time (Spesifikasikan komputer yang digunakan) ##\n # Divide and Conquer\n f.write(\"Execution Time\\n\")\n f.write(\"Divide and Conquer: \"+ str(listOfDotDnC.getSolvingTime())+ \"s\\n\")\n\n # Brute Force\n f.write(\"Brute Force: \"+ str(listOfDotBF.getSolvingTime())+ \"s\\n\")\n # Komputer yang digunakan\n f.write(\"Computer \")\n f.write(model)\n f.close()\n\n ## Sepasang Titik Terdekat dan Jaraknya ##\n # Divide and Conquer\n print(\"Divide and Conquer\")\n print(\"Closest Points: \", \"(\", listOfDotDnC.getClosestPoints()\n [0].getCoordinate(), \",\", listOfDotDnC.getClosestPoints()[1].getCoordinate(), \")\")\n print(\"Distance: \", listOfDotDnC.getShortestDistance())\n\n # Brute Force\n print(\"Brute Force\")\n print(\"Closest Points: \", \"(\", listOfDotBF.getClosestPoints()\n [0].getCoordinate(), \",\", listOfDotBF.getClosestPoints()[1].getCoordinate(), \")\")\n print(\"Distance: \", listOfDotBF.getShortestDistance())\n\n ## Banyak Operasi Perhitungan Rumus Euclidean ##\n # Divide and Conquer\n print(\"N perhitungan\")\n print(\"Divide and Conquer: \", listOfDotDnC.getNStep())\n\n # Brute Force\n print(\"Brute Force: \", listOfDotBF.getNStep())\n\n ## Execution Time (Spesifikasikan komputer yang digunakan) ##\n # Divide and Conquer\n print(\"Execution Time\")\n print(\"Divide and Conquer: \", listOfDotDnC.getSolvingTime(), \"s\")\n\n # Brute Force\n print(\"Brute Force: \", listOfDotBF.getSolvingTime(), \"s\")\n\n # Komputer yang digunakan\n print(\"Computer\", end=\" \")\n print(model)\n\n ## Visualisasi titik ##\n if (nDim == 3): # 3D\n visualizer.show3D(listOfDotBF)\n elif (nDim == 2): # 2D\n visualizer.show2D(listOfDotBF)\n elif (nDim == 1): # 1D\n visualizer.show1D(listOfDotBF)\n else:\n print(\"Kumpulan titik tidak dapat divisualisasikan\")\n","repo_name":"Mehmed13/Tucil2_13521066_13521172","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38210533284","text":"from django.urls import path\nfrom .views import LoginView, RegisterUsers, UpdateMentor, Invoice\n\n\nurlpatterns = [\n path('auth/login/', LoginView.as_view(), name=\"auth-login\"),\n path('auth/register/', RegisterUsers.as_view(), name=\"auth-register\"),\n path('mentors//', UpdateMentor.as_view(), name=\"update-mentor\"),\n path('mentors/invoice/', Invoice.as_view(), name=\"mentor-invoice\")\n]\n","repo_name":"tonymontaro/mentorci","sub_path":"mentor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"19587098451","text":"# service file for scheduler_api app\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\nfrom . import models\nfrom . import serializers\n\n\nclass SchedulerService:\n\n def schedule_list(self, interviewer_id, candidate_id):\n \"\"\"\n Get the schedule list based on inputs\n :param interviewer_id: interviewer email\n :param candidate_id: candidate email\n :return: time-slots as list\n \"\"\"\n interviewer_details = candidate_details = []\n if interviewer_id:\n interviewer_details = self.get_data(email_id=interviewer_id)\n if candidate_id:\n candidate_details = self.get_data(email_id=candidate_id)\n return Response(\n {\n 'status': 200,\n 'message': 'Details fetched successfully',\n 'interviewer_details': interviewer_details,\n 'candidate_details': candidate_details,\n },\n status=status.HTTP_200_OK\n )\n\n def get_data(self, email_id):\n \"\"\"\n get the user data based on email id\n :param email_id: user mail id\n :return: return user data\n \"\"\"\n if models.User.objects.filter(email=email_id).exists():\n user_obj = models.User.objects.get(email=email_id)\n return serializers.UserScheduleSerializer(user_obj).data\n","repo_name":"safeersayed/scheduler","sub_path":"scheduler_api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4268600243","text":"#! /usr/bin/env python\n# Authors: Alden Chen, Birinder Singh\n# Date: 2018/11/23\n# This script takes in the titanic dataset and splits it into\n# training and testing sets to be used in a decision tree model.\n# The sets are saved to the data folder.\n# It also uses the training set to do cross validation to find\n# The optimal depth tree. A plot of the validation performance is\n# also saved to results folder.\n# usage: python src/hyperparameter-tuning.py \"data/titanic_data.csv\" \"data\" \"results\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport os\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import train_test_split\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input_file')\nparser.add_argument('output_folder')\nparser.add_argument('results')\nargs = parser.parse_args()\n\ndef main():\n # load data\n data = args.input_file\n titanic_data = pd.read_csv(data)\n \n # Split data X and y\n features = titanic_data[[\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\",\n \"Fare\", \"Embarked\" ]]\n \n# feature_labels = np.reshape( features.columns.values, \n# len(features.columns.values), )\n \n \n target = titanic_data[[\"Survived\"]]\n \n X = features.values\n y = np.reshape(target.values, len(target.values), )\n \n # Get training set and test set\n X_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size = 0.2,\n random_state = 1)\n \n # initialize array to store validation performance data\n accuracy = []\n \n # 5-fold cross validation\n depths = range(1, 21)\n for i in depths:\n tree = DecisionTreeClassifier(max_depth = i)\n tree.fit(X_train, y_train)\n accuracy.append(np.mean(cross_val_score(tree, X_train, y_train, cv = 5)))\n \n \n \n #plot of validation performance\n plt.plot(depths, accuracy)\n plt.xlabel(\"Tree Depth\")\n plt.ylabel(\"Average Accuracy\")\n plt.title(\"Validation Performance\")\n \n \n # save testing and training data to data\n \n np.savetxt(args.output_folder + \"/\" + \"X_train.txt\", X_train)\n np.savetxt(args.output_folder + \"/\" + \"X_test.txt\", X_test)\n np.savetxt(args.output_folder + \"/\" + \"y_train.txt\", y_train)\n np.savetxt(args.output_folder + \"/\" + \"y_test.txt\", y_test)\n \n # save plot of validation performance to results\n plt.savefig(args.results + \"/\" + \"validation_5-fold-performance-plot.png\", format = \"png\")\n \n print(\"Best depth:\", np.argmax(accuracy) + 1)\n \nif __name__ == \"__main__\":\n main()\n \n","repo_name":"UBC-MDS/DSCI-522_Titanic-Survival-Prediction","sub_path":"src/hyperparameter-tuning.py","file_name":"hyperparameter-tuning.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71618811601","text":"# Instalação da lib do Selenium\n# pip3 install selenium\n\n# Instalação Beautifulsoup4\n# pip3 install beautifulsoup4\n\n# Instalação lxml\n# pip3 install lxml\n\n# ABRINDO O QUADRO DE NOTAS\n# Verificar o Quadro de Notas aggregatesum\n# linkQuadroNotas = \"https://ead.ifrn.edu.br/ava/academico/grade/edit/tree/index.php?id=639\" #+ codCurso #\"639\"\n# navegador.get(url=linkQuadroNotas)\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox import options\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.firefox.service import Service as FirefoxService\nfrom selenium.webdriver.support.ui import Select\n\nfrom time import sleep\n\nservice = FirefoxService(\n executable_path=\"./geckodriver\",\n)\n\noptions = Options()\noptions.headless = False # executar de forma visível ou oculta\n\n\ndef validarBotao(navegador, link, contBotao, notaMinima):\n checagemTotal = 0\n contarBotao = 0\n # print(\"Nota mínima: %s\" % notaMinima)\n # print(\"botão\")\n # ENTRANDO NO LIVRO\n # EXPANDINDO TUDO\n print(\"Analisando as configurações do Botão - Obter certificado\")\n navegador.find_element(by=By.LINK_TEXT, value=\"Expandir tudo\").click() # Abrir\n sleep(1)\n # GERAL - DESCRIÇÃO\n checagemTotal += 1\n input_GeralDescricao = False\n inputGeralDescricao = navegador.find_element(\n by=By.ID, value=\"id_introeditoreditable\"\n )\n # print(inputGeralDescricao.text)\n if inputGeralDescricao.text == '{GENERICO:type=\"certificate\"}':\n input_GeralDescricao = True\n else:\n print(\"ATENÇÃO: EXISTEM 2 BOTÕES, OBSERVAR QUAL O QUE IRÁ GERAR O CERTIFICADO\")\n contarBotao += 1\n\n # CONFIGURAÇÕES COMUNS DE MÓDULOS - DISPONIBILIDADE\n checagemTotal += 1\n input_ConfComumModuloDisponibilidade = False\n inputConfComumModuloDisponibilidade = Select(\n navegador.find_element(by=By.ID, value=\"id_visible\")\n )\n if (\n navegador.find_element(by=By.ID, value=\"id_visible\").get_attribute(\"value\")\n != \"1\"\n ): # 1 É PADRÃO(MOSTRAR NA PÁGINA DO CURSO)\n inputConfComumModuloDisponibilidade.select_by_value(\n \"1\"\n ) # 1 É PADRÃO(MOSTRAR NA PÁGINA DO CURSO)\n input_ConfComumModuloDisponibilidade = True\n contarBotao += 1\n # sleep(1)\n\n # RESTRIÇÃO ACESSO - ESTUDANTE\n botaoCertificadoEscolhaEstudante = navegador.find_element(\n By.XPATH,\n \"//select[@class='availability-neg custom-select mx-1' and @title='Tipo de restrição']\",\n )\n todasOpcoes = botaoCertificadoEscolhaEstudante.find_elements(By.TAG_NAME, \"option\")\n for opcao in todasOpcoes:\n # print(\"Valores são: %s\" % opcao.get_attribute(\"value\"))\n # print(opcao.text)\n if opcao.text == \"deve\":\n # print(\"O valor é: %s\" % opcao.get_attribute(\"value\"))\n opcao.click()\n # contarBotao+=1\n # sleep(1)\n\n # RESTRIÇÃO ACESSO - NOTA\n botaoCertificadoEscolhaNota = navegador.find_element(\n By.XPATH, \"//select[@class='custom-select' and @name='id']\"\n )\n todasOpcoes = botaoCertificadoEscolhaNota.find_elements(By.TAG_NAME, \"option\")\n for opcao in todasOpcoes:\n # print(\"Valores são: %s\" % opcao.get_attribute(\"value\"))\n # print(opcao.text)\n if opcao.text == \"Total do curso\":\n # print(\"O valor é: %s\" % opcao.get_attribute(\"value\"))\n opcao.click()\n # contarBotao+=1\n # sleep(1)\n\n # RESTRIÇÃO ACESSO - NOTA MÍNIMA SELECIONAR A OPÇÃO\n input_botaoCertificadoNotaMinima = False\n inputbotaoCertificadoNotaMinima = navegador.find_element(\n By.XPATH, \"//input[@class='form-check-input mx-1' and @name='min']\"\n )\n # todasOpcoes = botaoCertificadoNotaMinima.find_elements(By.TAG_NAME,\"option\")\n if (\n inputbotaoCertificadoNotaMinima.is_selected() == False\n ): # PADRÃO É MARCADO! SE ESTIVER DESMARCADO, ENTRE E MARQUE\n # print(\"clicando em nota mínima\")\n inputbotaoCertificadoNotaMinima.click()\n navegador.find_element(\n By.XPATH, \"//input[@class='form-control mx-1' and @name='minval']\"\n ).clear()\n navegador.find_element(\n By.XPATH, \"//input[@class='form-control mx-1' and @name='minval']\"\n ).send_keys(notaMinima)\n input_botaoCertificadoNotaMinima = True\n # input_botaoCertificadoInserirNotaMinima = True\n contarBotao += 1\n # sleep(1)\n # TESTAR PARA PEGAR SEMPRE A NOTA DA SEÇÃO\n # else:\n # navegador.find_element(By.XPATH, \"//input[@class='form-control mx-1' and @name='minval']\").clear()\n # navegador.find_element(By.XPATH, \"//input[@class='form-control mx-1' and @name='minval']\").send_keys(notaMinima)\n\n # RESTRINGIR ACESSO - NÃO SERÁ TRATADO\n\n # CONCLUSÃO DE ATIVIDADE\n # CONCLUSÃO DE ATIVIDADE - ACOMPANHAMENTO DE CONCLUSÃO\n checagemTotal += 1\n input_ConclusaoAtividadeAcompanhamento = False\n inputConclusaoAtividadeAcompanhamento = Select(\n navegador.find_element(by=By.ID, value=\"id_completion\")\n )\n if (\n navegador.find_element(by=By.ID, value=\"id_completion\").get_attribute(\"value\")\n != \"0\"\n ): # 0 É PADRÃO(N��O INDICAR A CONCLUSÃO DE ATIVIDADE)\n inputConclusaoAtividadeAcompanhamento.select_by_value(\n \"0\"\n ) # 0 É PADRÃO(NÃO INDICAR A CONCLUSÃO DE ATIVIDADE)\n input_ConclusaoAtividadeAcompanhamento = True\n contarBotao += 1\n # sleep(1)\n\n # CONCLUSÇÃO DE ATIVIDADE - CONCLUSÃO ESPERADA EM\n # checagemTotal+=1\n # input_ConclusaoAtividadeConclusaoEsperadaEm = False\n # inputConclusaoAtividadeConclusaoEsperadaEm = navegador.find_element(by=By.ID,value=\"id_completionexpected_enabled\")\n # if navegador.find_element(by=By.ID,value=\"id_completionexpected_enabled\").is_selected(): #PADRÃO É DESMARCADO! SE ESTIVER MARCADO, ENTRE E DESMARQUE\n # inputConclusaoAtividadeConclusaoEsperadaEm.click()\n # input_ConclusaoAtividadeConclusaoEsperadaEm = True\n # contarBotao+=1\n # sleep(1)\n\n # sleep(2)\n # CLICAR NO BOTÃO DE SALVAR\n navegador.find_element(by=By.ID, value=\"id_submitbutton2\").click()\n sleep(1)\n\n print(\"Total de verificações: %i \" % checagemTotal)\n if contarBotao > 0:\n if contarBotao < 2:\n print(\"Total de modificação: %i\" % contarBotao)\n else:\n print(\"Totais de modificações: %i\" % contarBotao)\n\n if input_GeralDescricao == False:\n print(\n '\"Texto do rótulo\" foi alterado para o Padrão - PADRÃO ({GENERICO:type=\"certificate\"}).'\n )\n\n if input_ConfComumModuloDisponibilidade != False:\n print(\n \"'Disponibilidade' foi alterado para o Padrão - PADRÃO (MOSTRAR NA PÁGINA DO CURSO).\"\n )\n # if input_ConfComumModuloNumIdentificacao != False:\n # print(\"'Número de identificação do módulo' foi alterado para o Padrão - PADRÃO ('VAZIO').\")\n\n if input_botaoCertificadoNotaMinima != False:\n print(\n \"' Restrições de acesso ' foi alterado para o Padrão - PADRÃO (DEVE - TOTAL DO CURSO - NOTA MÍNIMA).\"\n )\n\n if input_ConclusaoAtividadeAcompanhamento != False:\n print(\n \"'Acompanhamento de conclusão' foi alterado para o Padrão - PADRÃO (NÃO INDICAR A CONCLUSÃO DE ATIVIDADE).\"\n )\n\n # if input_ConclusaoAtividadeConclusaoEsperadaEm != False:\n # print(\"'Conclusão esperada em' foi alterado para o Padrão - PADRÃO ('DESMARCADO').\")\n else:\n print(\"Análise do Botão foi concluída sem observações!\")\n return contBotao\n","repo_name":"fabianofaustino13/moodlebot_down_atv","sub_path":"base/validaAtvBotao.py","file_name":"validaAtvBotao.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18865339097","text":"import random\n\ndef train_serenade(horn_blown):\n # List of poetic phrases for the train's serenade\n poetic_phrases = [\n \"Oh, Aurelia, my love in the twilight's embrace\",\n \"We dance through the stars, entwined in time's chase\",\n \"In the celestial ballet, our wheels weave and spin\",\n \"Across the cosmos, our hearts leap and sing\",\n \"Through the nebula's veil, our spirits take flight\",\n \"In a symphony of steel, we glide through the night\",\n \"With every mile traveled, our love's tale unfolds\",\n \"Like stardust and moonbeams, our story is told\",\n ]\n\n # Randomly select poetic phrases for the serenade\n random.shuffle(poetic_phrases)\n serenade_description = \"\\n\".join(poetic_phrases)\n\n # Add a closing phrase about the beauty of their connection\n serenade_description += \"\\n\\nIn this cosmic dance, our souls entwine, and our story echoes through time.\"\n\n # Check if Aurelia has blown her horn during the serenade\n if horn_blown:\n # If the horn was blown, a straw hat appears on one of her staff's heads\n staff_names = [\"Cassandra\", \"Lysander\", \"Seraphina\", \"Caius\", \"Aria\"]\n hat_staff = random.choice(staff_names)\n serenade_description += f\"\\n\\nAs the serenade concludes, a gentle breeze brings a straw hat to rest on {hat_staff}'s head.\"\n\n else:\n # If only the song is sung, a shooting star appears in the distance\n serenade_description += \"\\n\\nIn the distance, a shooting star streaks through the sky, where gravity pulls at the edge of a rainbow.\"\n\n # Return the description of the train's serenade to Aurelia\n return serenade_description\n","repo_name":"txtatech/virtual-forest","sub_path":"virtual-forest/game-code/def train_serenade.py","file_name":"def train_serenade.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1466959862","text":"from nadobka import Nadobka as nad\nfrom nadobka import Dutinka as dut\nfrom nadobka import Rameno as ram\n\n# Uzivatel zada cestu ku korenovemu adresaru projektu aj s nazvom projektu\n# Napr C:\\VAULTPRO_MCTEST\\DWI\\45\\Crown\nseznam_slozek = [\n \"Sablony naradi\",\n \"Stahovaci krouzky\", \n \"Chytaky\", \n \"Vodici pouzdra\", \n \"Navadeci krouzky\", \n \"Drzaky chytaku\",\n \"Sroubove cepy\",\n \"Trny\",\n \"Pruziny\",\n \"Navrhove soubory\",\n ]\n\nseznam_naradi = [\n \"Stahovaci krouzky\", \n \"Chytaky\", \n \"Vodici pouzdra\", \n \"Navadeci krouzky\", \n \"Drzaky chytaku\",\n \"Sroubove cepy\",\n \"Trny\",\n \"Pruziny\",\n ]\n\n# Inicializacia nadobky, dutinky a ramena\nnadobka = nad(\n input(\"Nazov projektu: \"),\n float((input(\"Priemer nadobky: \"))),\n float(input(\"Vyska nadobky: \")),\n int(input(\"Tlaková špecifikácia: \")),\n float(input(\"Hrubka steny kominku: \")),\n )\n\ndutinka = dut(\n float(input(\"Prumer dutinky: \")),\n float(input(\"Tloustka steny dutinky: \")),\n int(input(\"Vyska dutinky: \")),\n )\n\nrameno = ram(\n float(input(\"Priemer nadobky vo vyske ramena: \")),\n float(input(\"Tloustka steny vo vyske ramena: \")),\n float(input(\"Uhol ramena: \")),\n input(\"Tvar ramena: \"),\n int(input(\"Pocet tahov: \")),\n )\n\n#strom(seznam_slozek)","repo_name":"Stuler/Tooling-Design","sub_path":"main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29051581007","text":"# -*- coding: utf-8 -*-\nimport datetime\nfrom decimal import Decimal as D\n\nfrom balance import balance_steps as steps\nfrom btestlib import utils\nfrom temp.igogor.balance_objects import Contexts\n\nto_iso = utils.Date.date_to_iso_format\ndt_delta = utils.Date.dt_delta\n\nNOW = datetime.datetime.now()\nNOW_ISO = to_iso(NOW)\nHALF_YEAR_AFTER_NOW_ISO = to_iso(NOW + datetime.timedelta(days=180))\nHALF_YEAR_BEFORE_NOW_ISO = to_iso(NOW - datetime.timedelta(days=180))\nORDER_DT = NOW\nINVOICE_DT = NOW\nCOMPLETIONS_DT = NOW\nACT_DT = NOW\n\ncontext = Contexts.DIRECT_MONEY_RUB_CONTEXT\nQTY = D('250')\nCOMPLETIONS = D('100')\n\n# Создаём клиента\n# client_id = 10\n# client_id = steps.ClientSteps.create(params={'login': 'test_login'})\n#\n# # Привязать клиента к логину\n# # steps.ClientSteps.link(client_id, 'natabers')\n#\n# # Создаём плательщика\n# person_params = {}\n# person_id = None or steps.PersonSteps.create(client_id, context.person_type.code, person_params)\n#\n# # Создаём договор:\n# contract_id, _ = steps.ContractSteps.create_contract('opt_agency_prem_post',\n# {'CLIENT_ID': client_id,\n# 'PERSON_ID': person_id,\n# 'FIRM': '1',\n# 'CURRENCY': '810', # RUR\n# 'BANK_DETAILS_ID': '7627', # АО Юникредит Банк\n# 'MANAGER_CODE': '28133', # Яковенко Екатерина Сергеевна\n# 'MANAGER_BO_CODE': '30726', # Артельная Анна Витальевна\n# 'DT': to_iso(datetime.datetime(2020, 03, 01)),\n# 'FINISH_DT': to_iso(datetime.datetime(2021, 03, 01)),\n# 'UNILATERAL': '1',\n# 'TICKETS': 'BALANCEDUTY-207',\n# 'IS_SIGNED': to_iso(datetime.datetime(2020, 03, 01)),\n# 'SERVICES': [Services.GEO.id],\n# # 'PRINT_TEMPLATE': '/sales/processing/Billing-agreements/YandexGSAP/opt/premium/2/',\n# 'WHOLESALE_AGENT_PREMIUM_AWARDS_SCALE_TYPE': '2', # Премиум 2015\n# 'CREDIT_TYPE': '1', # по сроку\n# 'PAYMENT_TERM': '45', # 45 дней\n# 'CALC_DEFERMANT': '0', # от даты акта\n# # # 'COMMISSION_TYPE': 48,\n# # # 'NON_RESIDENT_CLIENTS': 0,\n# # # 'DEAL_PASSPORT': '2015-12-01T00:00:00',\n# # 'REPAYMENT_ON_CONSUME': 0,\n# # 'PERSONAL_ACCOUNT': 1,\n# # 'LIFT_CREDIT_ON_PAYMENT': 0,\n# # 'PERSONAL_ACCOUNT_FICTIVE': 1,\n# 'CREDIT_LIMIT_SINGLE': '1158648553',\n# })\n\n# Создаём доп.соглашение:\nsteps.ContractSteps.create_collateral(\n 2160,\n {\n 'contract2_id': 6269590,\n 'dt': '2021-03-01T00:00:00',\n },\n)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/temp/natabers/contracts.py","file_name":"contracts.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11581263111","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef extract_csv_gen_plot(csv_path):\n\n# data = pd.read_csv(csv_path, header=None)\n data = pd.read_csv(csv_path,float_precision='round_trip',header=None)\n data = data.iloc[:,2]\n df2 = pd.DataFrame(index=range(625),columns=range(625),dtype=np.float64)\n df2 = df2.iloc[1:]\n #df2.index = df2.index + 1\n\n\n idx = 0\n\n for i in range(1, 625):\n for n in range(1,625):\n# print(i,n,data[idx])\n df2[n][i] = data[idx]\n idx = idx + 1\n\n print(df2)\n# data = data.drop(data.columns[[0]], axis=1)\n# print(data)\n# df2.index.names = ['Code Region']\n g = sns.heatmap(df2,cmap=\"rocket_r\")\n# g = sns.heatmap(data,cmap=\"PiYG\",annot=True, vmin = 0.7, vmax = 1.0)\n g.set_yticklabels(g.get_yticklabels(), rotation=0)\n g.set_title('Heatmap')\n# plt.tight_layout()\n# plt.show()\n g.figure.savefig(\"heatmap.png\")\n\n\nextract_csv_gen_plot(\"cosine_similarity_DRB.csv\")\n","repo_name":"HPC-FAIR/DRBClone","sub_path":"DRB-cosineSim/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10564663529","text":"import requests,random,time\nfrom lxml import etree\n\nclass ShangBiao_img():\n\n def open_json(self):\n with open('搜索结果1.json','r',encoding='utf-8')as f:\n item=f.read()\n item=eval(item)\n print(item)\n dict=item['rows']\n print(dict)\n for data in dict:\n print(data)\n self.page_no=data[\"page_no\"]\n print('page_no=========',self.page_no)\n def get_id(self):\n session=requests.session()\n self.headers = {\"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Cookie\":'',# cookie\n \"Host\": \"sbgg.saic.gov.cn:9080\",\n \"Origin\": \"http://sbgg.saic.gov.cn:9080\",\n \"Referer\": \"http://sbgg.saic.gov.cn:9080/tmann/annInfoView/annSearch.html?annNum=1641\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\", }\n data={'annNum':'1642',\n 'annTypecode':'TMZCSQ'\n }\n url='http://sbgg.saic.gov.cn:9080/tmann/annInfoView/selectInfoidBycode.html'\n self.id=session.post(url=url,headers=self.headers,data=data).text\n print('id============',self.id)\n def get_img(self):\n url=\"http://sbgg.saic.gov.cn:9080/tmann/annInfoView/imageView.html\"\n data={'id':self.id,\n 'pageNum':'1',\n 'flag':'1'\n }\n img_json=requests.post(url=url,headers=self.headers,data=data).text\n print(img_json)\n image = img_json[\"imaglist\"][3]\n print(image)\n def run(self):\n self.open_json()\n self.get_id()\n self.get_img()\nShangBiao_img().run()\n\n\n\n","repo_name":"hl1227/study_notes","sub_path":"0000.py","file_name":"0000.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40269545356","text":"# -*- coding: utf-8 -*-\n\ndef index():\n return dict(message=T('Hello World'))\n\n\ndef projects():\n #COLUMNS=('project.name','project.author','project.repo','project.license')\n FIELDS=(db.project.id,db.project.name,db.project.created_by,db.project.manager,db.project.phase,db.project.repo)\n LINKS=[lambda row: A('Subprojects',_href=URL('projects',args=row.id)),\n lambda row: A('Issues',_href=URL('issues',args=row.id)),\n lambda row: A('Team',_href=URL('teams',args=row.id)) ]\n def check(row): return ((row.created_by == auth.user_id)|(row.manager == auth.user_id))\n if (request.args(0)):\n query = (db.project.super_project==request.args(0))\n #name = 'The subprojects of: '+ str(db(db.project.id==request.args(0)).select(db.project.name)).lstrip('project.name ')\n else:\n query = db.project\n #name = 'Project directory'\n grid = SQLFORM.grid(query,editable=check,deletable=check,\n fields = FIELDS,links=LINKS)\n return dict(grid=grid)#name=name)\n \ndef teams():\n def check(row): \n return (row.team_lead == auth.user_id)\n if (request.args(0)):\n query = (db.team.assigned_projects==request.args(0))\n else:\n query = db.team\n grid=SQLFORM.grid(query,editable=check,deletable=check)\n return dict(grid=grid)\n \n \n@auth.requires_membership('manager')\ndef roles():\n manager_id = db(db.auth_group.role == 'manager').select().first().id\n query = (db.auth_membership.group_id == manager_id)\n grid = SQLFORM.grid(query,editable=False)\n return dict(grid=grid)\n \n\ndef issues():\n project = db.project(request.args(0)) or redirect(URL('projects'))\n status = request.args(2)\n #TODO- show issues of the subprojects\n query = (db.issue.project == project.id)&(db.issue.is_last==True)\n if (request.args(1)):\n query = query&(db.issue.super_issue==request.args(1))\n if not status or status=='Open':\n query = query&(db.issue.status.belongs(['New','Assigned','Accepted','Started']))\n elif status=='Closed':\n query = query&(db.issue.status.belongs(\n ['Fixed','Verified','Invalid','Duplicate','WontFix','Done']))\n elif status!='All':\n query = query&(db.issue.status==status)\n \"\"\"comment\"\"\"\n from gluon.utils import web2py_uuid\n db.issue.project.default = project.id\n db.issue.uuid.default = web2py_uuid()\n db.issue.is_last.default = True\n db.issue.owner.default = project.created_by.email\n db.issue.description.default = DESCRIPTION\n db.issue.labels.represent = lambda v,r: ', '.join(v or [])\n if not auth.user or not (\n auth.user.id == project.created_by or \\\n auth.user.email in (project.members_email or [])):\n db.issue.owner.writable = False\n db.issue.status.writable = False\n FIELDS=(db.issue.id,db.issue.uuid,db.issue.status,db.issue.summary,db.issue.created_on,db.issue.author,db.issue.labels,)\n LINKS=[lambda row: A('Details',_href=URL('issue',args=row.uuid)),\n lambda row: A('Sub-issues',_href=URL('issues',args=[project.id,row.id])),\n lambda row2:A('Assignment',_href=URL('assign',args=row2.id)),\n lambda row3: A('Escalate', _href=URL('escalate',args=row3.id))]\n grid = SQLFORM.grid(query, fields = FIELDS,links=LINKS,\n details=False,editable=False,\n deletable=project.created_on==auth.user_id,\n create=auth.user_id,args=[project.id],\n oncreate=lambda form:do_mail([db.issue(form.vars.id)]))\n return dict(grid=grid, project=project)\n\ndef issue():\n last = db(db.issue.uuid==request.args(0))\\\n (db.issue.is_last==True).select().first()\n project = db.project(last.project) or redirect(URL('projects'))\n if auth.user:\n db.issue.status.default = last.status\n db.issue.summary.default = last.summary\n db.issue.project.default = last.project\n db.issue.uuid.default = last.uuid\n db.issue.is_last.default = True\n db.issue.owner.default = last.owner\n db.issue.labels.default = last.labels\n if not (auth.user.id == project.created_by or \\\n auth.user.email == last.owner or \\\n auth.user.email in (project.members_email or [])):\n db.issue.owner.default = project.created_by\n db.issue.owner.writable = False\n db.issue.status.writable = False\n form = SQLFORM(db.issue)\n if form.process().accepted:\n last.update_record(is_last=False)\n else:\n form = DIV('login to comment')\n items = db(db.issue.uuid==request.args(0)).select(\n orderby=db.issue.created_on)\n if isinstance(form,FORM) and form.accepted: do_mail(items)\n return dict(project=project,form=form,items=items,last=last)\n\n@auth.requires_membership('manager')\ndef assign():\n from datetime import datetime\n if (request.args(0)):\n query= (db.issue_assignment.issue==request.args(0))\n else:\n query=(db.issue_assignment)\n FIELDS=(db.issue_assignment.issue,db.issue_assignment.assigned_by,\\\n db.issue_assignment.assigned_to,db.issue_assignment.assigned_date)\n db.issue_assignment.assigned_by.default='%(first_name)s %(last_name)s' % auth.user\n db.issue_assignment.assigned_by.writable=False\n db.issue_assignment.assigned_date.default=datetime.now()\n db.issue_assignment.assigned_date.writable=False\n grid=SQLFORM.grid(query)\n return dict(grid=grid)\n\n@auth.requires_membership('manager')\ndef escalate():\n issueID=request.args(0)\n reference_project= db(db.issue.id==issueID).select().first()\n super_proj = db(db.project.id==reference_project.project).select(db.project.super_project).first()\n query = (db.issue.id==issueID)\n if super_proj.super_project == None:\n message = \"Already a top level project\"\n else:\n db(query).update(project=super_proj.super_project)\n message= \"The issue has been escalated\"\n session.flash = message\n redirect(URL('projects'))\n return dict()\n \ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n \"\"\"\n return dict(form=auth())\n\n\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request,db)\n\n\ndef call():\n \"\"\"\n exposes services. for example:\n http://..../[app]/default/call/jsonrpc\n decorate with @services.jsonrpc the functions to expose\n supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv\n \"\"\"\n return service()\n\n\n@auth.requires_signature()\ndef data():\n \"\"\"\n http://..../[app]/default/data/tables\n http://..../[app]/default/data/create/[table]\n http://..../[app]/default/data/read/[table]/[id]\n http://..../[app]/default/data/update/[table]/[id]\n http://..../[app]/default/data/delete/[table]/[id]\n http://..../[app]/default/data/select/[table]\n http://..../[app]/default/data/search/[table]\n but URLs bust be signed, i.e. linked with\n A('table',_href=URL('data/tables',user_signature=True))\n or with the signed load operator\n LOAD('default','data.load',args='tables',ajax=True,user_signature=True)\n \"\"\"\n return dict(form=crud())\n","repo_name":"mdipierro/web2py-appliances","sub_path":"IssueTracker/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"3"} +{"seq_id":"14476308365","text":"# -*- coding:utf-8 -*-\n\"\"\" \n@Time : 2019/2/27 9:06\n@Author :\n@function: 日志输出\n\"\"\"\nimport logging\nimport logging.handlers\n\n# 1、定义收集器,并给收集器指定级别\nmy_logger = logging.getLogger('testlogger')\nmy_logger.setLevel('DEBUG')\n# 设置日志输出格式\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s - [%(filename)s:%(lineno)s]\")\n# 2、指定输出渠道,并给渠道设置级别\nch = logging.StreamHandler() # 输出到控制台\nch.setLevel('DEBUG')\nch.setFormatter(formatter)\n# fh = logging.FileHandler('test.log',encoding='utf-8') # 输出到文件\nfh = logging.handlers.RotatingFileHandler('test.log',maxBytes=20*1024*1024, backupCount=10,encoding='utf-8')\nfh.setLevel('DEBUG')\nfh.setFormatter(formatter) # 设置输出格式\n# 3、对接 日志收集器与输出渠道 进行对接\nmy_logger.addHandler(fh)\nmy_logger.addHandler(ch)\n\nmy_logger.debug('123')\nmy_logger.warning('456')\n# 去掉重复日志 每次收集完毕之后,移除Handler\nmy_logger.removeHandler(ch)\nmy_logger.removeHandler(fh)","repo_name":"liying1990523/python_api_20190326","sub_path":"python13_api_test/study_logger.py","file_name":"study_logger.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71637070480","text":"\ncross_points = {(0,0),(-1, 0), (0, -1), (0,1),(1, 0)}\nall_points = {(-1,-1),(-1, 0), (-1, 1), (0, -1),(0, 0), (0,1), (1, -1), (1, 0), (1,1)}\nbasic_dict = { #Dictionary actually shows which points are not in range, instead of storing those which can be reached. This has been found to also generate some bad consequeces. \n (-1, 1) : {(-1, 1),(1,1), (1, -1), (-1, -1)},\n (1, 1) : {(1, 1),(-1,1), (1, -1), (-1, -1)},\n (-1, -1) : {(1,1), (1, -1), (-1, 1),(-1, -1)},\n (1, -1) : {(1,1), (-1, -1), (1, -1), (-1, 1)},\n (0,1): {(0,1),(0,-1)},\n (0,-1): {(0,-1),(0,1)},\n (1,0): {(1,0),(-1, 0)},\n (-1,0): {(-1, 0),(1,0)},\n (0,0): {(0,0)}\n}\n\ndef generate_new_dict(dict, new_point):\n ''' Generates copy of a dictionary, in which we add the point we have currently reached, thus preventing it to be reached again. '''\n new_dict = {}\n for key, value in dict.items():\n set = value.copy()\n set.add(new_point)\n new_dict[key] = set\n\n return new_dict\n \ndef check_new_lines(new_dict, new_point):\n ''' For every point (except the one we just added) we look to see, if any new connection is possible since the 'new_point' has been added. Here we have taken geometric aproach (as our field of points is structured as a plain), to find the point across the 'new_point' which we cal mirror_point. Maybe this way seems to elaborate for such a simple task but is choosen so that all the casses collapse into one idea. ''' \n for point in all_points - {new_point} :\n vector = (new_point[0] - point[0], new_point[1] - point[1]) # vec(AB) = B - A\n mirror_point = (vector[0] * 2 + point[0], vector[1] * 2 + point[1]) # mirror_point = point + 2*vec\n if mirror_point not in new_dict[(0,0)] and mirror_point[0] in {-1, 0, 1} and mirror_point[1] in {-1, 0, 1}:\n new_set = new_dict[point].copy()\n new_set.discard(mirror_point)\n new_dict[point] = new_set\n\ndef paths(start, length, dict=basic_dict):\n if length <= 1:\n return 1\n else:\n new_dict = generate_new_dict(dict, start)\n if start in cross_points:\n check_new_lines(new_dict, start)\n\n sum = 0\n for new_line in all_points - dict[start]:\n sum += paths(new_line, length-1, new_dict)\n\n return sum\n\n#print(paths((0,0), 3, basic_dict))","repo_name":"golobluka/random_problems","sub_path":"screen_locking_patterns.py","file_name":"screen_locking_patterns.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13599287573","text":"#standard imports\nimport pandas as pd\nimport numpy as np\n\n\ndef prep_bees():\n '''This function loads the bee_colony_loss.csv into a dataframe, cleans and sorts it, and returns a dataframe.'''\n # read the csv into a pandas dataframe\n df = pd.read_csv('bee_colony_loss.csv')\n # drop the unnamed column\n df = df.drop(columns='Unnamed: 0')\n # sort by descending year and ascending state\n df = df.sort_values(['year','state'], ascending=[False,True])\n # drop nulls\n df = df.dropna()\n # lowercase all strings in state and replace spaces with underscores\n df.state = df.state.str.lower().str.replace(' ','_')\n # lowercase all strings in the season column\n df.season = df.season.str.lower()\n # remove observations that have 10 or less beekeepers\n df = df[df.beekeepers > 10]\n # drop duplicate rows\n df = df.drop_duplicates()\n # change total_loss column to float\n df.total_loss = df.total_loss.astype(float)\n # change average_loss column to float\n df.average_loss = df.average_loss.astype(float)\n # change ending_colonies column to int\n df.ending_colonies = df.ending_colonies.astype(int)\n # change colonies_lost column to int\n df.colonies_lost = df.colonies_lost.astype(int)\n # pull only annual season data\n df = df[df.season == \"annual\"]\n #pull non multistates and non continental usa data\n df = df[(df.state != \"multistates\")& (df.state != \"non_continental_usa\")]\n # create column net gain / loss for each state\n df['colonies_net_gain'] = df.ending_colonies - df.starting_colonies\n # create a column for beekeeper to colony ratio\n df['beekeeper_colony_ratio'] = df.ending_colonies / df.beekeepers\n # look at only beekeepers exclusive to state\n df = df[df.beekeepers_exclusive_to_state == 100]\n \n # return the cleaned and sorted dataframe\n return df\n\n\ndef state_ansi():\n ''' This function will load state ansi from csv and turn state with its corresponding ansi'''\n #read the csv\n df = pd.read_csv(\"state_ansi.txt\",sep = \"|\")\n #lower case column names \n df.columns = df.columns.str.lower()\n #lower case string values on the column and replace wmpty spaces with underscore\n df.state_name = df.state_name.str.lower().str.replace(' ','_')\n #rename column names and drop unnecessary columns\n df = df.rename(columns = {\"state\":\"ansi\", \"state_name\":\"state\"}).drop(columns = [\"stusab\", \"statens\"])\n \n #return back dataframe\n return df\n\ndef geo_data():\n ''' This function will load state ansi from csv and turn state with its corresponding ansi'''\n #read csv\n df = pd.read_csv(\"state_geocords.csv\", index_col = [0] )\n #rename column\n df= df.rename(columns = {\"name\":\"state\"})\n # lowercase values of column and replace spaces with underscore\n df.state = df.state.str.lower().str.replace(' ','_')\n #pull only useful column\n df = df[[\"state\",\"latitude\",\"longitude\"]]\n \n #return back dataframe\n return df\n\ndef bee_merged():\n \"\"\"This function will call in three different function and merge them all\"\"\"\n #call in prep bees function\n df = prep_bees()\n #call in function for state ansi data\n df1 = state_ansi()\n #call in function for geo data\n df2 = geo_data()\n #left join prep_bees dataset with state_ansi\n df = df.merge(df1, on = 'state', how = 'left')\n #left join prep_bees dataster with geo_state\n df = df.merge(df2, on=\"state\", how = \"left\")\n \n #return back dataframe\n return df\n","repo_name":"Honey-Bee-Roundup-Capstone/rajesh","sub_path":"wrangle.py","file_name":"wrangle.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16471255240","text":"# Types and Casting\n\ntype(3)\ntype(3.14)\ntype('Pi')\n\nsome_number = 5\ntype(some_number)\nanother_number = 9.81\ntype(another_number)\nsome_string = 'Hello world'\ntype(some_string)\n\n'''\n(Type)-casting\nIf you have a string with a number in it and you want to use it in a calculation,\nor if you have a number and want to use it in a sentence: that's possible.\nWhat you can do is \"cast\" the value to another type. In other words: casting means converting a value of one type to another type.\n\n'I like ' + str(3.14)\n'''\n\n\n# Strings\n\nexample_one = 'I am a string.'\nexample_two = \"Me too!\"\nexample_three = \"\"\"I too am a string.\n I am, in fact, a multiline string!\"\"\"\n\nexample_one = 'I\\'m a string.'\nexample_two = \"I'm a string.\"\nexample_three = 'He said: \"I\\'m a string\"'\n\n# There are more characters with special meanings:\n\n# \\n produces a new line;\n# \\t produces a tab.\n\n\n","repo_name":"RomuloPy/WincAcademy-repo","sub_path":"Back-end/types and casting/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3367119460","text":"# Routines for Weighted Approximate Fekete Points\n\nimport numpy as np\nfrom recurrence import jacobi_recurrence\nfrom opolynd import opolynd_eval\n\ndef legendre_wafp(lambdas, M=1e3, sampler=None):\n \"\"\"\n Generate M (= lambdas.shape[0]) weighted approximate Fekete points\n using randomized sampling.\n \"\"\"\n\n from scipy.linalg import qr\n from legendre_induced import induced_distribution_mixture_sampling\n\n if lambdas.ndim == 1:\n lambdas = np.reshape(lambdas, [lambdas.size, 1])\n\n N, d = lambdas.shape\n\n ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)\n\n if sampler is None:\n sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM)\n\n # Choose at least 2*N samples\n M = max(M, 2*N)\n\n x = sampler(M)\n V = opolynd_eval(x, lambdas, ab)\n _, _, p = qr(V.T/np.sqrt(np.sum(V**2,axis=1)), pivoting=True, mode='economic')\n\n return x[p[:N], :]\n\ndef legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None):\n \"\"\"\n Adds M_enrich points to the existing point set x by (approximate)\n determinant maximization.\n \"\"\"\n\n from legendre_induced import induced_distribution_mixture_sampling\n\n if lambdas.ndim == 1:\n lambdas = np.reshape(lambdas, [lambdas.size, 1])\n\n if sampler is None:\n sampler = lambda MM: induced_distribution_mixture_sampling(lambdas, MM)\n\n M0 = x.shape[0]\n N, d = lambdas.shape\n ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)\n\n M = max(M0, 2*N)\n\n while x.shape[0] < M0 + M_enrich:\n V = opolynd_eval(x, lambdas, ab)\n W = (V.T/np.sqrt(np.sum(V**2,axis=1))).T * np.sqrt(float(N)/float(x.shape[0]))\n G = np.dot(W.T, W)\n iG = np.linalg.inv(G)\n\n xs = sampler(M)\n Vs = opolynd_eval(xs, lambdas, ab)\n Ws = (Vs.T/np.sqrt(np.sum(Vs**2,axis=1))).T\n dets = np.sum((Ws*np.dot(Ws, iG))**2, axis=1)\n ind = np.argmax(dets)\n\n x = np.vstack([x, xs[ind,:]])\n\n return x\n\nif __name__ == \"__main__\":\n\n from matplotlib import pyplot as plt\n from indexing import total_degree_indices, hyperbolic_cross_indices\n from recurrence import jacobi_recurrence\n from opolynd import opolynd_eval\n\n d, k = 9, 7\n\n #lambdas = total_degree_indices(d, k)\n lambdas = hyperbolic_cross_indices(d, k)\n N = lambdas.shape[0]\n\n x = legendre_wafp(lambdas, M=3e3)\n\n ab = jacobi_recurrence(lambdas.max() + 1, alpha=0., beta=0., probability=True)\n\n V = opolynd_eval(x, lambdas, ab)\n W = (V.T/np.sqrt(np.sum(V**2,axis=1))).T * np.sqrt(float(N)/float(x.shape[0]))\n\n M_enrich = 20\n x2 = legendre_wafp_enrichment(x, lambdas, M_enrich, sampler=None)\n V2 = opolynd_eval(x2, lambdas, ab)\n W2 = (V2.T/np.sqrt(np.sum(V2**2,axis=1))).T * np.sqrt(float(N)/float(x2.shape[0]))\n\n # W: unenriched\n # W2: enriched\n","repo_name":"SCIInstitute/FwdInvToolkit","sub_path":"PythonLibrary/uncertainty/wafp.py","file_name":"wafp.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"24614310262","text":"config = {\n\t\"ak3\": {\n\t\t# type: bool\n\t\t# Include date in zip filename\n\t\t\"include_date_in_zip_filename\": True,\n\t},\n\n\t\"build\": {\n\t\t# type: bool\n\t\t# Enable ccache\n\t\t\"enable_ccache\": True,\n\n\t\t# type: str\n\t\t# Build user name, will set KBUILD_BUILD_USER\n\t\t\"kbuild_build_user\": \"SebaUbuntu\",\n\n\t\t# type: str\n\t\t# Build host name, will set KBUILD_BUILD_HOST\n\t\t\"kbuild_build_host\": \"Seba-PC\",\n\n\t\t# type: str\n\t\t# Common name of the kernel\n\t\t\"kernel_name\": \"fratm\",\n\n\t\t# type: str\n\t\t# Common version of the kernels\n\t\t\"kernel_version\": \"1.0\",\n\t},\n}\n","repo_name":"sebaubuntu-python/build_kernel","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"3"} +{"seq_id":"20155541184","text":"#!C:\\Users\\79192\\AppData\\Local\\Programs\\Python\\Python311\\python.exe\r\n\r\nfrom connect import *\r\nimport math\r\nimport cgi\r\nimport cgitb\r\nimport gl\r\ncgitb.enable()\r\nform = cgi.FieldStorage()\r\n\r\n#!/usr/bin/evn python3\r\n\r\noption = form.getlist('radio_fio')\r\nm=form.getfirst('M')\r\nv = form.getfirst('v')\r\nc=form.getfirst('c')\r\nF=form.getfirst('F')\r\n\r\nrealF=float(m)*(9.8+float(v)*math.sqrt(float(c)/float(m)*10**3))\r\nrealF=round(realF,1)\r\nprint(f\"
Введенный ответ:
F={F}
\")\r\nprint(f\"
Правильный ответ:
F={realF}
\")\r\nrez=0\r\nif(realF==float(F)):\r\n print(\"

Ответ верный!

\")\r\n rez=1\r\nelse:\r\n print(\"

Ответ неверный!

\")\r\ncur=con.cursor()\r\ncur._query(f\"Select max(n) FROM `oscillatory_motion` WHERE id={gl.i}\")\r\nresult = cur.fetchone()[0]\r\nif(result==None):\r\n result=1\r\nelse:\r\n result+=1\r\ncur._query(f\"\"\"insert into `oscillatory_motion` values\r\n\t(\r\n\t\t{result},{gl.i},{m},{v},{c},{F},{rez}\t\r\n\t)\"\"\")\r\ncon.commit()\r\nprint(\"\"\"Вернуться на главную страницу\"\"\")","repo_name":"Vov4ik4573/diplom","sub_path":"oscillatory_motion2.py","file_name":"oscillatory_motion2.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"739998053","text":"# a=int(input(\"숫자1->\"))\r\n# b=int(input(\"숫자2->\"))\r\n# c=int(input(\"숫자3->\"))\r\n# d=int(input(\"숫자4->\"))\r\n# hap=a+b+c+d\r\n# print(hap)\r\n# 리스트\r\n# 리스트명=[값1,값2,....]\r\n# a=[1,2,3,4,5] a[0],a[1],a[3]...\r\n# aa=[0,0,0,0]\r\n# aa[0]=int(input(\"숫자1->\"))\r\n# aa[1]=int(input(\"숫자2->\"))\r\n# aa[2]=int(input(\"숫자3->\"))\r\n# aa[3]=int(input(\"숫자4->\"))\r\n# hap=aa[0]+aa[1]+aa[2]+aa[3]\r\n# print(hap)\r\n# print(aa)\r\n# print(len(aa)) #리스트aa의 길이\r\n# ----\r\n# animal=['cat','dog','bird']\r\n# print(animal)\r\n# print(animal[1])\r\n# for i in range(3): #0<=i<3 i=0,1,2\r\n# print(i,animal[i])\r\n# print(len(animal))\r\n# print('-'*30)\r\n# for i in range(len(animal)):#0<=i<3 i=0,1,2\r\n# print(i,animal[i])\r\n#\r\n# for i in animal: # i=animal[0],animal[1],animal[2]\r\n# print(i)\r\n# -----------\r\n# mix=[1,2,3,\"apple\",[10,20]]\r\n# print(mix)\r\n# print(len(mix))\r\n# print(mix[3])\r\n# print(mix[4])\r\n# for i in range(len(mix)): # i=0,1,2,3,4\r\n# print(mix[i])\r\n# for i in mix: #i=mix[0],mix[1],mix[2],..mix[4]\r\n# print(i)\r\n# print(mix[4])\r\n# print(mix[4][0])\r\n# print(mix[4][1])\r\n# a=[10,20]\r\n# a=['zero','one','two','three','four','five','six','seven']\r\n# print(a)\r\n# print(a[:])\r\n# print(a[:3],a[1:-1],a[1:-1:2]) #[시작인덱스:끝인덱스:step(생략시1)]\r\n# print(a[1::2])\r\n# b=[1,2,3]\r\n# print(b)\r\n# #\r\n# print(a+b)\r\n# print(b*3)\r\n# b[0]=100\r\n# print(b)\r\n# a[2]=b\r\n# print(a)\r\n# # 삭제\r\n# del a[2]\r\n# print(a)\r\n# del a[:]\r\n# print(a)\r\n# 추가\r\n# a=[89,12,5,8,3,1,2]\r\n# print(type(a))\r\n# print(type(a[2]))\r\n# a.append(100)\r\n# a.append(200)\r\n# print(a)\r\n# a.pop() #마지막값 삭제\r\n# print(a)\r\n# a.remove(8) #처음만나는 숫자8을 삭제\r\n# print(a)\r\n# a.sort() #오름차순정렬\r\n# print(a)\r\n# a.reverse() #내림차순 정렬\r\n# print(a)\r\n# aa리스트의 크기는 100이고 0,2,4,6,8 처럼 짝수로 초기화\r\n# aa=[]\r\n# for i in range(100): #i=0,1,2,....99\r\n# aa.append(i*2)\r\n# print(aa)\r\n# bb리스트의 크기는 100이고 198,196,194,...0으로 초기화\r\n# bb[0]=aa[99],bb[1]=aa[98],bb[2]=aa[97],.....\r\n# bb=[]\r\n# for i in range(100):\r\n# bb.append(aa[99-i])\r\n# print(bb)\r\n\r\n# aa.reverse()\r\n# bb=aa\r\n# print(bb)\r\n#cc에 3의 배수 200개를 입력하고 10번째,20번째,....190,200번째 값을 출력하세요\r\n# cc=[]\r\n# for i in range(200):\r\n# cc.append((i+1)*3)\r\n# # if (i%9==0) & (i!=0):\r\n# # print(cc[i])\r\n# for i in range(9,200,10):\r\n# print(cc[i])\r\n\r\n# my=[30,10,20]\r\n# print(\"현재List %s\"%my)\r\n# my.append(40)\r\n# print(\"append(40)후 %s\"%my)\r\n# my.pop()\r\n# print(\"pop()후 %s\"%my)\r\n# my.sort()\r\n# print(\"sort()후 %s\"%my)\r\n# my.reverse()\r\n# print(\"reverse()후 %s\"%my)\r\n# print(\"20의 위치 %s\"%my.index(20))\r\n# my.insert(2,222)\r\n# print(\"insert(2,222)후 %s\"%my)\r\n# my.remove(222)\r\n# print(\"remove(222)후 %s\"%my)\r\n# my.extend([77,88,99])\r\n# print(\"extend([77,88,99])후 %s\"%my)\r\nmy=[30,10,20]\r\na=[89,12,5,8,3,1,2]\r\nanimal=['cat','dog','bird']\r\nt=[]\r\nt.append(my)\r\nt.append(a)\r\nt.append(animal)\r\n # t=[]\r\n # t.append(1)\r\n # t.append(2)\r\n # t.append(3) t=[1,2,3]\r\n# print(t)\r\n# print(len(t))\r\nfor i in range(len(t)): #i=0,1,2\r\n print(\"t[%s] 의 값 %s\"%(i,t[i]))\r\n for j in range(len(t[i])):\r\n print( \"%s의 %s번째값 %s\"%(t[i],j,t[i][j]) )\r\n\r\n\r\n\r\n\r\n","repo_name":"LimeOrangeTree/PYTHON","sub_path":"list1.py","file_name":"list1.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12943780396","text":"'''\nPerspective Transform Estimation Implementation\n'''\nimport numpy as np\n\n\ndef getXY(TransformMat, x, y):\n '''\n Obtain X,Y using PTE of x,y by TransformMat\n :param TransformMat: Transform Matrix\n :param x: source x\n :param y: source y\n :return: \n '''\n g = TransformMat[2][0]\n h = TransformMat[2][1]\n W = g * x + h * y + 1\n inputs = [[x], [y], [1]]\n results = np.matmul(TransformMat, inputs)\n return [results[0][0] / W, results[1][0] / W]\n\n\ndef getTransformMat(A1, A2, A3, A4, B1, B2, B3, B4):\n '''\n Obtain PTE matrix using 4 coordinate point pairs\n :param A1: \n :param A2: \n :param A3: \n :param A4: \n :param B1: \n :param B2: \n :param B3: \n :param B4: \n :return: \n '''\n (x1, y1) = A1[:]\n (x2, y2) = A2[:]\n (x3, y3) = A3[:]\n (x4, y4) = A4[:]\n (X1, Y1) = B1[:]\n (X2, Y2) = B2[:]\n (X3, Y3) = B3[:]\n (X4, Y4) = B4[:]\n\n A = [[x1, y1, 1, 0, 0, 0, -X1 * x1, -X1 * y1],\n [0, 0, 0, x1, y1, 1, -Y1 * x1, -Y1 * y1],\n [x2, y2, 1, 0, 0, 0, -X2 * x2, -X2 * y2],\n [0, 0, 0, x2, y2, 1, -Y2 * x2, -Y2 * y2],\n [x3, y3, 1, 0, 0, 0, -X3 * x3, -X3 * y3],\n [0, 0, 0, x3, y3, 1, -Y3 * x3, -Y3 * y3],\n [x4, y4, 1, 0, 0, 0, -X4 * x4, -X4 * y4],\n [0, 0, 0, x4, y4, 1, -Y4 * x4, -Y4 * y4]]\n B = [[X1], [Y1], [X2], [Y2], [X3], [Y3], [X4], [Y4]]\n\n # Al=B\n A = np.array(A)\n B = np.array(B)\n l = np.matmul(np.linalg.inv(np.matmul(A.transpose(), A)), np.matmul(A.transpose(), B))\n [a, b, c, d, e, f, g, h] = l[:]\n transformMat = [[a, b, c], [d, e, f], [g, h, 1]]\n return transformMat\n\n\nif __name__ == \"__main__\":\n # Test Snippet\n TransformMat = getTransformMat([0, 0], [1, 0], [1, 1], [0, 1], [10, 10], [15, 10], [15, 15], [10, 15])\n (X, Y) = getXY(TransformMat=TransformMat, x=0.5, y=0)\n print(X, Y)\n","repo_name":"eduze/AugurSense","sub_path":"sense/QuadToQuadMap.py","file_name":"QuadToQuadMap.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"3008720002","text":"#! /usr/bin/env python3\n\n# By ArkC developers\n# Released under GNU General Public License 2\n\nimport argparse\n\nfrom common import *\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"ArkC Key Utilities\")\n # Load arguments\n #parser.add_argument(\"-v\", dest=\"v\", action=\"store_true\", help=\"show detailed logs\")\n parser.add_argument('-gs', '--get-SHA1', dest=\"gs\", help=\"Get sha1 of a public or private key file\")\n options = parser.parse_args()\n if options.gs:\n try:\n key_data = open(options.gs, \"r\").read()\n key = certloader(key_data).importKey()\n key_sha1 = certloader(key_data).getSHA1()\n print(key_sha1)\n except Exception as err:\n print (\"Fatal error while loading local certificate.\")\n print (err)\n quit()\n ","repo_name":"projectarkc/arkc-keyutils","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17299172754","text":"# coding= utf-8\n\nfrom bs4 import BeautifulSoup\nfrom Event import *\nfrom News import *\nfrom IntheMedia import *\nimport mysql.connector\nimport time\nfrom Scraper import Scraper\n\n'''\ntoDict()\n\nMethod to convert a model object for DNDi posts into a dictionary.\n\nParameters: Article - Must be either ScientificArticle or PressRelease\n\nReturns: result dictionary\n'''\ndef toDict(article):\n result = {}\n result[ 'id' ] = article._id\n result[ 'title' ] = article._title\n result[ 'tag' ] = article._tags\n result[ 'category'] = article._cats\n result[ 'body' ] = article._body\n result[ 'url' ] = article._url\n result[ 'date' ] = article._datestr\n return result \n\n'''\nDNDiDB\n\nClass models MySQL database connection\n'''\nclass DNDiDB:\n # Declarations\n ###\n # Tags provided by DNDi\n TAGS = [ 'HAT – Sleeping Sickness', 'Chagas disease', 'Filarial diseases', 'Hepatitis C', 'Leishmaniasis', 'Malaria', 'Mycetoma',\n 'Paediatric HIV', 'Access', 'Advocacy', 'Funding', 'Partnership', 'Regulatory', 'Strengthening capacities', 'Treatment' ]\n # Valid month codes\n MONTH = { 'January': '01', 'February': '02', 'March': '03', 'April': '04','May': '05','June': '06',\n 'July': '07','August': '08','September': '09','October': '10','November': '11','December': '12'}\n\n '''\n Parameters: List of database tables\n '''\n def __init__(self, tables ):\n self.tables = tables\n # Initialize a connection to the database\n self.connection = mysql.connector.connect(user='root', database='dndi', password='cs300')\n # Create a new database curson\n self.cursor = self.connection.cursor()\n\n '''\n deleteALL()\n\n Drop all data from the database\n '''\n def deleteALL(self):\n for table in self.tables:\n self.cursor.execute( ( \"delete from \" + table ) )\n self.cursor.execute( ( \"delete from tagPost\" ) )\n self.connection.commit()\n\n '''\n importTable\n\n Import all relevant data into a given table.\n\n Parameter: Table\n '''\n def importTable( self, table ):\n # Initialize SQL queries\n addStatment = ( \"INSERT INTO \" + table + \" ( id, title, date, body, url, category ) VALUES( %s, %s, %s, %s, %s, %s )\" )\n tagStatement = ( \"INSERT INTO tagPost ( tag_id, post_id ) VALUES( %s, %s )\" )\n # Get the data for the given table\n data = self.getData( table )\n # If data is retrieved successfully\n if data is not False:\n # For each row of the table\n for datum in data:\n # Get the date\n date = self._getDate( datum['date'] )\n dataStatement = ( datum['id'],datum['title'],date,datum['body'],datum['url'],datum['category'])\n # Apply tags\n for tag in datum['tag']:\n if tag[:3] == 'HAT':\n tagID = 0\n else:\n tagID = self.TAGS.index(tag)\n try:\n self.cursor.execute( tagStatement, ( tagID, datum['id'] ) )\n except Exception as e: \n if e.errno == 1406:\n dataStatement = ( datum['id'],datum['title'][0:200], None, datum['body'],datum['url'],datum['category'])\n self.cursor.execute( addStatment, dataStatement )\n # Execute the query\n try:\n self.cursor.execute( addStatment, dataStatement )\n except Exception as e:\n if e.errno == 1292:\n dataStatement = ( datum['id'],datum['title'], None, datum['body'],datum['url'],datum['category'])\n self.cursor.execute( addStatment, dataStatement )\n else:\n print(datum)\n logging.exception( datum )\n self.connection.commit()\n \n print(table + ' is commited!')\n\n '''\n getData()\n\n Get all relevant data for the given table.\n\n Parameter: Table\n '''\n def getData( self, table ):\n if table == 'Event':\n return getAllEvent()\n elif table == 'News':\n return getAllNews()\n elif table == 'InTheMedia':\n return getAllInTheMedia()\n elif table == 'ScientificArticle':\n scraper = Scraper()\n scraper.load_pages(\"http://www.dndi.org/category/media-centre/scientific-articles/page/\")\n scraper.find_articles()\n result = []\n for article in scraper._articles:\n result.append( toDict(article) )\n return result\n elif table == 'PressRelease':\n scraper = Scraper()\n scraper.load_pages(\"http://www.dndi.org/category/media-centre/press-releases/page/\")\n scraper.find_releases()\n result = []\n for article in scraper._releases:\n result.append( toDict(article) )\n return result\n else:\n return False\n\n '''\n importAll\n\n Imports relevant data into each table specified.\n\n No parameters\n '''\n def importAll(self):\n for table in self.tables:\n self.importTable( table )\n\n\n '''\n __getDate\n\n Parse a date from a given string.\n\n Parameter: String representing a date\n '''\n def _getDate(self, string):\n string = string.strip()\n string = string.replace('[', '')\n string = string.split(']')[0]\n string = string.replace(',', '').strip()\n array = string.split(' ')\n if len( array ) == 1:\n if array[0].isdigit():\n return array[0] + '-00-00'\n else:\n return None\n elif len( array ) == 2:\n if self.MONTH.has_key( array[ 0 ] ):\n month = self.MONTH[ array[0] ]\n else:\n month = '00'\n return array[-1] + '-' + month + '-00'\n else:\n if array[0].split('-')[0].isdigit():\n date = array[0]\n if self.MONTH.has_key( array[ 1 ] ):\n month = self.MONTH[ array[ 1 ] ]\n else:\n month = '00'\n else:\n date = array[1]\n if self.MONTH.has_key( array[ 0 ] ):\n month = self.MONTH[ array[ 0 ] ]\n else:\n month = '00'\n if len(date) > 1:\n if not date[1].isdigit():\n date = '0' + date[0]\n return array[-1] + '-' + month + '-' + date[:2]\n \nstart_time = time.time()\n\ndndi = DNDiDB( [ 'Event', 'News', 'InTheMedia', 'ScientificArticle', 'PressRelease' ] )\ndndi.deleteALL()\ndndi.importAll()\n\nprint('Total Elapsed: ' + str( time.time() - start_time ) )","repo_name":"1upD/DNDiGraph","sub_path":"app/Helpers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24622017188","text":"import numpy as np\nimport frames\n\ndef find_anomaly(in_path,out_path):\n def helper(frames):\n ts=z_score(area(frames))\n outliners=np.array(frames)[( np.abs(ts)>2)]\n return outliners\n frames.transform_template(in_path,out_path,helper,False)\n\ndef z_score(frames):\n frames=np.array(frames)\n frames-=np.mean(frames)\n frames/=np.std(frames)\t\n return frames\n\ndef area(frames):\n frames=np.array(frames)\n frames[frames!=0]=1.0\n return np.array([ np.mean(frame_i) for frame_i in frames])\n\nin_path=\"../../clean/clf/result\"\nout_path=\"test\"\nfind_anomaly(in_path,out_path)","repo_name":"tjacek/pcloud_utils","sub_path":"detect_person/anomaly.py","file_name":"anomaly.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71977488721","text":"# -*- coding: utf-8 -*-\nimport glob\nimport os\nimport os.path\n\nimport Kittens.utils\nfrom PyQt4.Qt import QWidget, QDialog, QWizard, QWizardPage, QButtonGroup, QVBoxLayout, QRadioButton, QObject, SIGNAL, QHBoxLayout, QLineEdit, QPushButton, QFileDialog, QMessageBox\n\nimport Purr\nfrom Purr import pixmaps\n\n\nclass Error(RuntimeError):\n def __init__(self, message):\n RuntimeError.__init__(self)\n self.error_message = message\n\n\nwizard_dialog = None\n\n\ndef startWizard(args, mainwin, modal=True):\n \"\"\"\n Parses list of directories ('args') and attaches to purrlogs appropriately.\n 'mainwin' is a Purr.MainWindow object.\n\n Return value:\n * if modal=True: True if arguments parse OK and Purr.MainWindow should be shown, False on error.\n * if modal=False: True if attached to purrlog and Purr.MainWindow should be shown, False otherwise.\n\n If modal=False, it will keep displaying the startup dialog as a modeless dialog, and attach the\n mainwin to the specified purrlog when the dialog is closed.\n\n Use cases:\n $ purr []\n 1. If dirname does not exist:\n * find other purrlogs in parent dir\n * pop up wizard, with the name as the default \"create\" option, and create selected\n 2. If dirname exists:\n 2a. Not a directory: report error\n 2b. If dir is a purrlog, attach and add dirs\n 2c. If dir contains 1 valid purrlog, attach, and add more dirs.\n 2d. If dir contains 2+ purrlogs, offer choice (or create new), add more dirs\n 2e. If dir contains no purrlogs, offer to create one (or to attach to one), add \"more dirs\" to watchlist if creating new, ignore if attaching\n $ purr\n * same as \"purr .\"\n $ from meqbrowser, when cd into a directory:\n * if purr dialog is visible, do same as \"purr .\"\n $ from meqbrowser, when purr button is pressed and purr is not yet visible\n * do \"purr .\"\n $ from Cattery.Calico, when an MS is selected:\n * do purr MS.purrlog .\n \"\"\"\n\n args = args or [os.getcwd()]\n dirname = os.path.abspath(args[0])\n moredirs = args[1:]\n\n # case 1: dirname does not exist, or refers to a non-directory\n if not os.path.exists(dirname) or not os.path.isdir(dirname):\n message = \"\"\"To begin with, PURR must load an existing purrlog, or start a new purrlog. %s does not look to be an existing purrlog.\n What would you like to do?\"\"\" % Kittens.utils.collapseuser(dirname)\n create = dirname\n dirname = os.getcwd()\n parent = os.path.dirname(os.path.normpath(create)) or os.getcwd()\n # if parent is valid dir, find purrlogs in parent (to offer as an option)\n if os.path.isdir(parent):\n purrlogs = list(filter(Purr.Purrer.is_purrlog, glob.glob(os.path.join(parent, \"*\"))))\n # else use \".\" as dirname, and do not offer any purrlogs\n else:\n purrlogs = []\n # case 2: dirname exists:\n else:\n create = None\n # case 2b: is a valid purrlog\n if Purr.Purrer.is_purrlog(dirname):\n mainwin.attachPurrlog(dirname, moredirs)\n mainwin.show()\n return True\n # case 2c-2e. Look for purrlogs in dirname\n purrlogs = list(filter(Purr.Purrer.is_purrlog, glob.glob(os.path.join(dirname, \"*\"))))\n # case 2c: exactly one purrlog. Attach without asking.\n if len(purrlogs) == 1:\n mainwin.show()\n mainwin.attachPurrlog(purrlogs[0], moredirs)\n return True\n # else setup messages\n if purrlogs:\n message = \"\"\"To begin with, PURR must load an existing purrlog, or start a new purrlog. The directory %s contains\n several purrlogs. What would you like to do?\"\"\" % Kittens.utils.collapseuser(dirname)\n else:\n message = \"\"\"To begin with, PURR must load an existing purrlog, or create a new purrlog. The directory %s contains\n no purrlogs. What would you like to do?\"\"\" % Kittens.utils.collapseuser(dirname)\n\n # case 1, 2d or 2e: make wizard dialog\n\n # kill old wizard, if showing\n global wizard_dialog\n if wizard_dialog:\n wizard_dialog.hide()\n dum = QWidget()\n wizard_dialog.setParent(dum)\n dum = wizard_dialog = None\n\n # create new wizard\n wizard_dialog = PurrStartupWizard(mainwin, dirname, purrlogs, moredirs=moredirs, create=create, message=message)\n\n if modal:\n if wizard_dialog.exec_() == QDialog.Rejected:\n return False\n return True;\n else:\n wizard_dialog.setModal(False)\n wizard_dialog.show()\n return False\n\n\nclass PurrStartupWizard(QWizard):\n class StartPage(QWizardPage):\n def __init__(self, dirname, purrlogs, parent=None, create=None, message=None):\n QWizardPage.__init__(self, parent)\n self.dirname = dirname\n self.purrlogs = purrlogs or []\n bg = QButtonGroup(self)\n lo = QVBoxLayout()\n self.setLayout(lo)\n # set page titles\n self.setTitle(\"Starting PURR\")\n message and self.setSubTitle(message)\n if not purrlogs:\n self.rbs_log = []\n else:\n # add options for existing purrlogs\n self.rbs_log = [QRadioButton(\"Load %s\" % Kittens.utils.collapseuser(log)) for log in purrlogs]\n for rb in self.rbs_log:\n lo.addWidget(rb)\n bg.addButton(rb)\n QObject.connect(rb, SIGNAL(\"toggled(bool)\"), self.checkCompleteness)\n self.rbs_log[0].setChecked(True)\n # add option to load another purrlog\n lo1 = QHBoxLayout()\n self.rb_other = QRadioButton(\"Load purrlog from:\")\n lo1.addWidget(self.rb_other)\n bg.addButton(self.rb_other)\n self.wother = QLineEdit()\n self.wother.setReadOnly(True)\n lo1.addWidget(self.wother, 1)\n pb = QPushButton(pixmaps.folder_open.icon(), \"\")\n QObject.connect(pb, SIGNAL(\"clicked()\"), self._select_other_dialog)\n QObject.connect(self.rb_other, SIGNAL(\"toggled(bool)\"), pb.setEnabled)\n QObject.connect(self.rb_other, SIGNAL(\"toggled(bool)\"), self.wother.setEnabled)\n QObject.connect(self.rb_other, SIGNAL(\"toggled(bool)\"), self.checkCompleteness)\n pb.setEnabled(False)\n self.wother.setEnabled(False)\n lo1.addWidget(pb)\n lo.addLayout(lo1)\n self.load_path = None\n\n # add option to create new purrlog\n lo1 = QHBoxLayout()\n self.rb_create = QRadioButton(\"Create new purrlog:\")\n lo1.addWidget(self.rb_create)\n bg.addButton(self.rb_create)\n self.wcreate = QLineEdit()\n lo1.addWidget(self.wcreate, 1)\n pb = QPushButton(pixmaps.folder_open.icon(), \"\")\n QObject.connect(pb, SIGNAL(\"clicked()\"), self._select_create_dialog)\n QObject.connect(self.rb_create, SIGNAL(\"toggled(bool)\"), pb.setEnabled)\n QObject.connect(self.rb_create, SIGNAL(\"toggled(bool)\"), self.wcreate.setEnabled)\n QObject.connect(self.rb_create, SIGNAL(\"toggled(bool)\"), self.checkCompleteness)\n QObject.connect(self.wcreate, SIGNAL(\"editingFinished()\"), self._validate_create_filename)\n pb.setEnabled(False)\n self.wcreate.setEnabled(False)\n # this holds the last validated name\n self._validated_create_path = None\n self._validated_result = False\n # generate default name for a new purrlog\n self.create_path = os.path.join(dirname, \"purrlog\")\n num = 0\n while os.path.exists(self.create_path):\n self.create_path = os.path.join(dirname, \"purrlog.%d\" % num)\n num += 1\n # This will be not None as long as a valid name is entered\n self.create_path = Kittens.utils.collapseuser(os.path.normpath(self.create_path))\n if create:\n self.wcreate.setText(create or Kittens.utils.collapseuser(create))\n # this will emit checkCompleteness(), causing a _validate_create_filename() call, causing the content of the wcreate widget\n # to be validated and copied to create_path if valid, or reset from create_path if invalid\n self.rb_create.setChecked(True)\n else:\n self.wcreate.setText(self.create_path)\n\n lo1.addWidget(pb)\n lo.addLayout(lo1)\n\n # make create option default, if no purrlogs\n if not purrlogs:\n self.rb_create.setChecked(True)\n\n def _select_other_dialog(self):\n path = str(QFileDialog.getExistingDirectory(self, \"Select purrlog\", self.dirname))\n if not path:\n return\n if not Purr.Purrer.is_purrlog(path):\n QMessageBox.warning(self, \"Invalid purrlog\",\n \"The path you have selected, %s, does not refer to a valid purrlog.\" % Kittens.utils.collapseuser(\n path))\n return\n self.load_path = path\n self.wother.setText(Kittens.utils.collapseuser(path))\n self.checkCompleteness()\n\n def _validate_create_filename(self, path=None, check=True):\n if path is None:\n path = str(self.wcreate.text())\n # if we have already validated this path, then return the last validation result.\n # This is mostly to keep from bombarding the user with repeated error dialogs.\n if self._validated_create_path == path:\n return self._validated_result\n self._validated_create_path = path\n self._validated_result = False; # set to True if all checks pass\n # now process the path. Normalize it, and expand \"~\"\n path = os.path.expanduser(os.path.normpath(path))\n # if not absolute, join to current directory\n if not os.path.isabs(path):\n path = os.path.join(self.dirname, path)\n # collapse to \"~\" (for error messages)\n path0 = Kittens.utils.collapseuser(path)\n if os.path.exists(path):\n QMessageBox.warning(self, \"Can't create purrlog\",\n \"\"\"Unable to create purrlog %s: file or directory already exists. Please select another name\"\"\" % path0)\n self.create_path and self.wcreate.setText(Kittens.utils.collapseuser(self.create_path))\n return False\n if not os.access(os.path.dirname(os.path.normpath(path)) or '.', os.W_OK):\n QMessageBox.warning(self, \"Can't create purrlog\",\n \"\"\"Unable to create purrlog %s: can't write to parent directory. Please select another path.\"\"\" % path0)\n self.create_path and self.wcreate.setText(Kittens.utils.collapseuser(self.create_path))\n return False\n self.create_path = path\n self.wcreate.setText(path0)\n self._validated_result = True; # set to True if all checks pass\n if check:\n self.checkCompleteness()\n return True\n\n def _select_create_dialog(self):\n path = str(QFileDialog.getSaveFileName(self, \"Create new purrlog\", self.create_path))\n if path:\n self._validate_create_filename(path)\n\n def checkCompleteness(self, toggled=None):\n if toggled and hasattr(self, 'rb_other') and self.rb_other.isChecked() and not self.load_path:\n self._select_other_dialog()\n else:\n self.emit(SIGNAL(\"completeChanged()\"))\n\n def isComplete(self):\n if hasattr(self, 'rb_create') and self.rb_create.isChecked():\n return self._validate_create_filename(check=False) and bool(self.create_path)\n if hasattr(self, 'rb_other') and self.rb_other.isChecked():\n return bool(self.load_path)\n return True\n\n def selectedPath(self):\n for (rb, log) in zip(self.rbs_log, self.purrlogs):\n if rb.isChecked():\n return log\n if self.rb_other.isChecked():\n return self.load_path\n if self.rb_create.isChecked():\n return self.create_path\n return None\n\n def __init__(self, mainwin, dirname, purrlogs, moredirs=[], create=None, message=None):\n QWizard.__init__(self, mainwin)\n self.setWindowTitle(\"Starting PURR\")\n self.setPixmap(QWizard.LogoPixmap, pixmaps.purr_logo.pm())\n self.setOption(QWizard.NoBackButtonOnStartPage)\n self.setButtonText(QWizard.FinishButton, \"Proceed\")\n # create start page\n self._startpage = self.StartPage(dirname, purrlogs, create=create, message=message)\n self.addPage(self._startpage)\n # internal state\n self._dirname = dirname\n self._mainwin = mainwin\n self._moredirs = moredirs\n\n def done(self, code):\n if code == QDialog.Accepted:\n # check path, set code to rejected if none set\n path = self._startpage.selectedPath()\n if not path:\n print(\"No path selected in StartupWizard. This is probably a bug, please report it!\")\n code = QDialog.Rejected\n else:\n # show the main window\n self._mainwin.show()\n # if attaching to existing purrlog, cancel the moredirs argument\n # if creating new purrlog, add parent directory to watchlist\n moredirs = None if os.path.exists(path) else [self._dirname] + list(self._moredirs)\n self._mainwin.attachPurrlog(path, moredirs)\n return QDialog.done(self, code)\n\n def selectedPath(self):\n return self._startpage.selectedPath()\n\n# class PurrlogSelectWizard (QDialog):\n# def __init__(self,parent,dirname,purrlogs):\n# QDialog.__init__(self,parent)\n# self.setWindowTitle(\"PURR Startup Wizard\")\n# self._currier = Kittens.utils.PersistentCurrier()\n# self._lo = QVBoxLayout(self)\n# self._lo.setSpacing(0)\n# for log in purrlogs:\n# self.button(pixmaps.purr_logo.icon(),\"Load %s\"%Kittens.utils.collapseuser(log),self._currier.curry(self._load_log,log))\n# self.button(pixmaps.purr_logo.icon(),\"Load a different purrlog...\",self._load_other_log)\n# self.button(pixmaps.purr_logo.icon(),\"Create new purrlog in %s...\"%Kittens.utils.collapseuser(dirname),self._create_log)\n# self._lo.addSpacing(10)\n# self.button(pixmaps.red_round_cross.icon(),\"Cancel\",self.reject)\n#\n# def button (self,icon,text,callback):\n# pb = QToolButton(self)\n# pb.setIcon(icon)\n# pb.setText(text)\n# pb.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)\n# pb.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.MinimumExpanding)\n# # pb.setFlat(True)\n# self._lo.addWidget(pb)\n# QObject.connect(pb,SIGNAL(\"clicked()\"),callback)\n# return pb\n#\n# def _load_log (self,logname):\n# pass\n#\n# def _create_log (self):\n# pass\n#\n# def _load_other_log (self):\n# dialog = QFileDialog(parent,\"Open or create purrlog\",dirname,\"*purrlog\")\n# try:\n# dialog.setOption(QFileDialog.ShowDirsOnly)\n# dialog.setFileMode(QFileDialog.Directory)\n# except AttributeError: # Qt 4.4 has no setOption\n# dialog.setFileMode(QFileDialog.DirectoryOnly)\n# if purrlogs:\n# dialog.setSidebarUrls(map(QUrl,purrlogs))\n# if not dialog.exec_():\n# return False\n# logname = str(dialog.selectedFiles()[0])\n# mainwin.show()\n# mainwin.attachPurrlog(logname,moredirs)\n# return True\n","repo_name":"ratt-ru/purr","sub_path":"Purr/Startup.py","file_name":"Startup.py","file_ext":"py","file_size_in_byte":15875,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8996673361","text":"import rasterio\nimport numpy as np\nimport cv2\nimport argparse\n\n\ndef apply_distance_buffer(input_path, output_path, background_class_label,\n target_class_label, buffer_size):\n\n with rasterio.open(input_path) as f:\n mask = f.read().squeeze()\n mask_profile = f.profile\n\n nodata_mask = (mask != target_class_label).astype(np.uint8)\n\n\n transform = cv2.distanceTransform(\n nodata_mask, distanceType=cv2.DIST_L2, maskSize=3\n )\n\n\n # buffer size is in meters -- this is in units of the CRS\n background_mask = (transform > 0) & (transform < buffer_size)\n\n\n mask[background_mask] = target_class_label\n\n\n with rasterio.open(output_path, \"w\", **mask_profile) as f:\n f.write(mask, 1)\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(description='Basic statistics on tfrecord files')\n\n parser.add_argument('--input_path', dest='input_path',\n required=True)\n parser.add_argument('--output_path', dest='output_path',\n required=True)\n parser.add_argument('--background_class_label', \n dest='background_class_label',\n default=1)\n parser.add_argument('--target_class_label',\n dest='target_class_label',\n default=3)\n parser.add_argument('--buffer_size',\n dest='buffer_size',\n default=3)\n\n\n parsed_args = parser.parse_args()\n\n return parsed_args\n\ndef main():\n parsed_args = parse_args()\n\n apply_distance_buffer(parsed_args.input_path, parsed_args.output_path,\n parsed_args.background_class_label, \n parsed_args.target_class_label,\n parsed_args.buffer_size)\n\nif __name__ == '__main__':\n main()\n","repo_name":"beerys/fast_segmentation","sub_path":"apply_distance_buffer_to_mask.py","file_name":"apply_distance_buffer_to_mask.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71921005522","text":"'''\r\nThis is a primitive tik-tak-toe game. I followed the tutorial\r\nfrom AtiByte ( https://www.youtube.com/channel/UC4L3JyeL7TXQM1f3yD6iVQQ )\r\n\r\nYou can restart the game by pressing the space button,\r\nand close it by pressing the esc.\r\n\r\ncode by arturfriedrich ( https://github.com/arturfriedrich )\r\ninspiration by atibyte ( https://github.com/totex )\r\n'''\r\n\r\nimport pygame\r\nfrom grid import Grid\r\n\r\nimport os\r\nos.environ[\"SDL_VIDEO_WINDOW_POS\"] = \"400, 100\"\r\n\r\nsurface = pygame.display.set_mode((600, 600))\r\npygame.display.set_caption(\"Tic-tak-toe\")\r\n\r\ngrid = Grid()\r\n\r\nplayer = \"X\"\r\n\r\nrunning = True\r\n\r\nwhile running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN and not grid.game_over:\r\n if pygame.mouse.get_pressed()[0]:\r\n pos = pygame.mouse.get_pos()\r\n grid.get_mouse(pos[0] // 200, pos[1] // 200, player)\r\n if grid.switch_player:\r\n if player == \"X\":\r\n player = \"O\"\r\n else:\r\n player = \"X\"\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE and grid.game_over:\r\n grid.clear_grid()\r\n grid.game_over = False\r\n elif event.key == pygame.K_ESCAPE:\r\n running = False\r\n\r\n surface.fill((0, 0, 0))\r\n\r\n grid.draw(surface)\r\n\r\n pygame.display.flip()","repo_name":"arturfriedrich/tik-tak-toe-singleplayer","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18390556866","text":"# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution: # 56ms\n \n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n if len(intervals) == 0: return []\n elif len(intervals) == 1: return [intervals[0]]\n \n lst = sorted(list(intervals), key= lambda x: x.start)\n merged = []\n for i in range(len(lst)):\n if merged != [] and merged[-1].end >= lst[i].start:\n merged[-1].end = max(lst[i].end,merged[-1].end)\n else:\n merged.append(lst[i])\n return merged\n \n def insert(self, intervals: List[Interval], newInterval: Interval) -> List[Interval]:\n if not intervals:\n return [newInterval]\n if not newInterval:\n return intervals\n \n lst = sorted(list(intervals), key= lambda x: x.start)\n if newInterval.end < lst[0].start:\n lst = [newInterval] + lst\n return lst\n elif newInterval.start > lst[-1].end:\n lst += [newInterval]\n return lst\n elif newInterval.start <= lst[0].start and newInterval.end >= lst[-1].end:\n lst = [newInterval]\n return lst\n \n for i in range(len(lst)):\n if not (newInterval.end < lst[i].start or lst[i].end < newInterval.start):\n break\n if i == len(lst):\n # no overlap occurs\n lst += [newInterval]\n return lst\n else:\n # some overlap occurs!\n lst += [newInterval]\n return self.merge(lst)","repo_name":"aroraakshit/coding_prep","sub_path":"insert_interval.py","file_name":"insert_interval.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"69873646162","text":"import sys\nboard=list(list(map(int, sys.stdin.readline().split())) for _ in range(9))\nempty=[(i, j) for i in range(9) for j in range(9) if board[i][j]==0]\ndef check(r,c):\n num=[1,2,3,4,5,6,7,8,9]\n for i in range(9): # 각 행, 열 검사\n if board[r][i] in num:\n num.remove(board[r][i])\n if board[i][c] in num:\n num.remove(board[i][c])\n r=(r//3)*3\n c=(c//3)*3\n for i in range(r, r+3):\n for j in range(c, c+3):\n if board[i][j] in num:\n num.remove(board[i][j])\n return num\ndef bt(cnt):\n if cnt==len(empty):#더 이상 0이 없음.\n for i in board:\n print(*i)\n sys.exit()\n (r, c)=empty[cnt] #다음 0의 위치 저장\n nums=check(r, c)\n for num in nums:\n board[r][c]=num\n bt(cnt+1)\n board[r][c]=0\nbt(0)\n","repo_name":"auddus16/Algorithm_python","sub_path":"백트래킹/2580_스도쿠.py","file_name":"2580_스도쿠.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10794725853","text":"from django.shortcuts import render, redirect\nfrom .forms import userform\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom .models import user\n\n# Create your views here.\n@login_required(login_url='login')\ndef index(request):\n data = user.objects.all()\n uid = request.session.get('uid')\n return render(request, 'index.html',{'data':data, 'cid':user.objects.get(id=uid)})\n\ndef login(request):\n if request.method == 'POST':\n useremail = request.POST['email']\n usercity = request.POST['city']\n usermod = user.objects.filter(email=useremail, city=usercity)\n uid = user.objects.get(email=useremail)\n if usermod:\n print(\"login done\")\n request.session['email']=useremail\n request.session['uid'] = uid.id\n \n return redirect('index')\n else:\n print(usermod.errors)\n return render(request, 'login.html')\n\ndef updatadat(request, stid):\n id = user.objects.get(id=stid)\n if request.method == 'POST':\n updatauser = userform(request.POST)\n if updatauser.is_valid():\n updatauser = userform(request.POST, instance=id)\n updatauser.save()\n return redirect('index')\n else:\n print(updatauser.errors)\n return render(request, 'updatadat.html',{'stdata':user.objects.get(id=stid)})\n\ndef useradd(request):\n if request.method == 'POST':\n userdata = userform(request.POST)\n if userdata.is_valid():\n userdata.save()\n return redirect('login')\n else:\n print(userform.errors)\n return render(request, 'useradd.html')\n\ndef deletedata(request, stid):\n id = user.objects.get(id=stid)\n \n user.delete(id)\n return redirect('index')\n\ndef userlogout(request):\n logout(request)\n return redirect(\"login\")\n\ndef searchname(request):\n if request.method == 'GET':\n query = request.GET.get('query')\n \n \n if query:\n username = user.objects.filter(name__icontains=query)\n \n return render(request, 'searchname.html', {'usernameof':username})\n else:\n print(\"No information to show\")\n return render(request, 'searchname.html', {})","repo_name":"manavgathani/05July_Manav_Python","sub_path":"Django Project/IPtest/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18565237485","text":"from PyQt6.QtWidgets import QCheckBox\nfrom PyQt6.QtGui import QColor, QBrush, QPaintEvent, QPen, QPainter\nfrom PyQt6.QtCore import Qt, QSize, QPoint, QPointF, QRectF, QEasingCurve, QPropertyAnimation, QSequentialAnimationGroup, pyqtSlot, pyqtProperty\n# credits to https://www.pythonguis.com/tutorials/pyqt6-animated-widgets/ \nclass AnimatedToggle(QCheckBox):\n\n _transparent_pen = QPen(Qt.GlobalColor.transparent)\n _light_grey_pen = QPen(Qt.GlobalColor.lightGray)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._bar_color = Qt.GlobalColor.lightGray\n self._handle_color = Qt.GlobalColor.white\n self._checked_color = \"#007bff\" \n self._pulse_unchecked_color = \"#AFAFAF\" \n self._pulse_checked_color = \"#AFAFAF\"\n\n self._bar_brush = QBrush(self._bar_color)\n self._bar_checked_brush = QBrush(QColor(self._checked_color).lighter())\n self._handle_brush = QBrush(self._handle_color)\n self._handle_checked_brush = QBrush(QColor(self._checked_color))\n self._pulse_unchecked_animation = QBrush(QColor(self._pulse_unchecked_color))\n self._pulse_checked_animation = QBrush(QColor(self._pulse_checked_color))\n\n self.setContentsMargins(8, 0, 8, 0)\n self._handle_position = 0\n self._pulse_radius = 0\n\n self.animation = QPropertyAnimation(self, b\"handle_position\", self)\n self.animation.setEasingCurve(QEasingCurve.Type.InOutCubic)\n self.animation.setDuration(200)\n\n self.pulse_anim = QPropertyAnimation(self, b\"pulse_radius\", self)\n self.pulse_anim.setDuration(350)\n self.pulse_anim.setStartValue(10)\n self.pulse_anim.setEndValue(20)\n\n self.animations_group = QSequentialAnimationGroup()\n self.animations_group.addAnimation(self.animation)\n self.animations_group.addAnimation(self.pulse_anim)\n\n self.stateChanged.connect(self.setup_animation)\n\n def sizeHint(self):\n return QSize(58, 45)\n\n def hitButton(self, pos: QPoint):\n return self.contentsRect().contains(pos)\n\n @pyqtSlot(int)\n def setup_animation(self, value):\n self.animations_group.stop()\n if value:\n self.animation.setEndValue(1)\n else:\n self.animation.setEndValue(0)\n self.animations_group.start()\n\n def paintEvent(self, e: QPaintEvent):\n contRect = self.contentsRect()\n handleRadius = round(0.24 * contRect.height())\n\n p = QPainter(self)\n p.setRenderHint(QPainter.RenderHint.Antialiasing)\n\n p.setPen(self._transparent_pen)\n barRect = QRectF(\n 0, 0,\n contRect.width() - handleRadius, 0.40 * contRect.height()\n )\n barRect.moveCenter(QPointF(contRect.center()))\n rounding = barRect.height() / 2\n\n trailLength = contRect.width() - 2 * handleRadius\n\n xPos = contRect.x() + handleRadius + trailLength * self._handle_position\n\n if self.pulse_anim.state() == QPropertyAnimation.State.Running:\n p.setBrush(\n self._pulse_checked_animation if\n self.isChecked() else self._pulse_unchecked_animation)\n p.drawEllipse(QPointF(xPos, barRect.center().y()),\n self._pulse_radius, self._pulse_radius)\n\n if self.isChecked():\n p.setBrush(self._bar_checked_brush)\n p.drawRoundedRect(barRect, rounding, rounding)\n p.setBrush(self._handle_checked_brush)\n else:\n p.setBrush(self._bar_brush)\n p.drawRoundedRect(barRect, rounding, rounding)\n p.setPen(self._light_grey_pen)\n p.setBrush(self._handle_brush)\n\n p.drawEllipse(\n QPointF(xPos, barRect.center().y()),\n handleRadius, handleRadius)\n\n p.end()\n\n # Setter methods for color properties\n def setBarColor(self, color):\n self._bar_color = color\n self._bar_brush = QBrush(color)\n self.update()\n\n def setHandleColor(self, color):\n self._handle_color = color\n self._handle_brush = QBrush(color)\n self.update()\n\n def setCheckedColor(self, color):\n \n self._checked_color = color\n self._bar_checked_brush = QBrush(QColor(color).lighter())\n self._handle_checked_brush = QBrush(QColor(color))\n self.update()\n\n def setPulseUncheckedColor(self, color):\n self._pulse_unchecked_color = color\n self._pulse_unchecked_animation = QBrush(QColor(color))\n self.update()\n\n def setPulseCheckedColor(self, color):\n self._pulse_checked_color = color\n self._pulse_checked_animation = QBrush(QColor(color))\n self.update()\n\n @pyqtProperty(float)\n def handle_position(self):\n return self._handle_position\n\n @handle_position.setter\n def handle_position(self, pos):\n self._handle_position = pos\n self.update()\n\n @pyqtProperty(float)\n def pulse_radius(self):\n return self._pulse_radius\n\n @pulse_radius.setter\n def pulse_radius(self, pos):\n self._pulse_radius = pos\n self.update()\n","repo_name":"rakshith111/Time-Stack","sub_path":"src/libs/QClasses/QToggle.py","file_name":"QToggle.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73749105362","text":"# app.py\n# v2 - 10:34pm\nfrom flask import Flask, json, request, jsonify, Response, render_template\nfrom pprint import pprint\nimport pymongo\nfrom bson.json_util import dumps, loads\nimport numpy as np\nimport pandas as pd\n\napp = Flask(__name__)\n\nglobal_df = pd.DataFrame()\n\ndef word_count(input):\n counts = dict()\n \n for word in input:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n return counts\n\n@app.route(\"/api/test/\")\ndef test():\n # return \"\"\"

route works

\"\"\"\n # set up database connection\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n # query db\n testoutput = mongo_collection.find({},{ \"_id\": 1, \"Hashtags(#)\": 1, \"Identity\": \"Akshay Kumar\"}).limit(10)\n # turn into JSON\n testoutput_listcursor = list(testoutput)\n json_data = dumps(testoutput_listcursor, indent=2)\n return json_data\n\n@app.route(\"/api/dynamictest/\", methods=['GET'])\ndef dynamictest():\n # return \"\"\"

route works

\"\"\"\n # get twitter identity from API URL query / call \n QueryIdentity = request.args.get(\"identity\", None)\n print(f\"got name {QueryIdentity}\")\n if QueryIdentity.startswith('\"') and QueryIdentity.endswith('\"'):\n QueryIdentity = QueryIdentity[1:-1]\n print(f\"revised name {QueryIdentity}\")\n # set up database connection\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n # query db\n testoutput = mongo_collection.find( { \"Identity\": QueryIdentity },{ \"_id\": 0, \"Hashtags(#)\": 1} ).limit(20)\n # turn into JSON\n testoutput_listcursor = list(testoutput)\n print(testoutput_listcursor)\n json_data = dumps(testoutput_listcursor, indent=2)\n return json_data\n # stringreturn = \"

Name Is \" + QueryIdentity + \"

\"\n # return stringreturn\n\n\n@app.route(\"/api/wordcloud/\", methods=['GET'])\ndef wordcloud():\n # get twitter identity from API URL query / call \n QueryIdentity = request.args.get(\"identity\", None)\n print(f\"got name {QueryIdentity}\")\n if QueryIdentity.startswith('\"') and QueryIdentity.endswith('\"'):\n QueryIdentity = QueryIdentity[1:-1]\n print(f\"revised name {QueryIdentity}\")\n # set up database connection\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n # query db\n output = mongo_collection.find( { \"Identity\": QueryIdentity, \"Hashtags(#)\": {\"$ne\" : '[]'} },{ \"_id\": 0, \"Hashtags(#)\": 1} )\n # convert to dataframe\n output_df = pd.DataFrame(list(output))\n HashTagArray = [eval(x) for x in output_df[\"Hashtags(#)\"]]\n flattened_Hashtag_list = []\n for l1 in HashTagArray:\n for l2 in l1:\n # l2 = l2.replace('\"','')\n flattened_Hashtag_list.append(l2)\n hashtag_dict = word_count(flattened_Hashtag_list)\n dict_df = pd.DataFrame()\n dict_df = pd.DataFrame(list(hashtag_dict.items()),columns = ['text','size']) \n dict_df.sort_values(['size'], ascending=False, inplace=True)\n dict_df = dict_df.head(250)\n # x = hashtag_dict\n # sorted_hashtag_dict = {k: v for k, v in sorted(x.items(), key=lambda item: item[1], reverse=True)}\n # sorted_df = pd.DataFrame(sorted_hashtag_dict,columns=[['Hashtag','Count']])\n return Response(dict_df.to_json(orient=\"records\"), mimetype='application/json')\n\n@app.route(\"/api/identitylist/\")\ndef list_identities():\n Identity_List = ['Akshay Kumar', 'Amitabh Bachchan', 'Ariana Grande', 'Barack Obama', 'BBC', 'Bill Gates', 'Britney Spears', 'Bruno Mars', 'CNN Breaking', 'CNN', 'Cristiano Ronaldo', 'Donald Trump', 'Drake', 'ESPN', 'FC Barcelona', 'Harry Styles', 'Instagram', 'Jimmy Fallon', 'J Lo', 'Justin Bieber', 'Justin Timberlake', 'Katy Perry', 'Kevin Hart', 'Kim Kardashian', 'Lady Gaga', 'Le Bron James', 'Liam Payne', 'Lil Wayne', 'Louis Tomlinson', 'Miley Cyrus', 'Narendra Modi', 'NASA', 'Neymar Jr', 'Niall Horan', 'NY Times', 'Oprah', 'Pink', 'Real Madrid', 'Rihanna', 'Salman Khan', 'Selena Gomez', 'Shah Rukh Khan', 'Shakira', 'Sports Center', 'Taylor Swift', 'The Ellen Show', 'Twitter', 'Virat Kohli', 'Wiz Khalifa', 'Youtube']\n Identities_df = pd.DataFrame(Identity_List,columns=['Identities'])\n return Response(Identities_df.to_json(orient=\"records\"), mimetype='application/json')\n\n\n@app.route('/getmsg/', methods=['GET'])\ndef respond():\n # Retrieve the name from url parameter\n name = request.args.get(\"name\", None)\n\n # For debugging\n print(f\"got name {name}\")\n\n response = {}\n\n # Check if user sent a name at all\n if not name:\n response[\"ERROR\"] = \"no name found, please send a name.\"\n # Check if the user entered a number not a name\n elif str(name).isdigit():\n response[\"ERROR\"] = \"name can't be numeric.\"\n # Now the user entered a valid name\n else:\n response[\"MESSAGE\"] = f\"Welcome {name} to our awesome platform!!\"\n\n # Return the response in json format\n return jsonify(response)\n\n\n\n@app.route(\"/api/average/\", methods=['GET'])\ndef average():\n # set\n QueryName = request.args.get(\"name\",None)\n print(f\"got name {QueryName}\")\n if QueryName.startswith('\"\"') and QueryName.endswith('\"\"'):\n QueryName = QueryName[2:-2]\n print(f\"revised name {QueryName}\")\n # set updatabase connection\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n #testoutput = mongo_collection.find( { \"Identity\": QueryName }).sort([(\"Likes\",-1)]).limit(1) \n testoutput = mongo_collection.aggregate([{ '$match': { 'Identity': QueryName } },{ '$group': { '_id': 1, 'average': { '$avg': \"$Likes\" } } }])\n # turn into JSONJSON\n print(f\"output {testoutput}\")\n testoutput_listcursor = list(testoutput)\n print(testoutput_listcursor)\n json_data = dumps(testoutput_listcursor, indent=2)\n return json_data\n # query db\n\n\n@app.route(\"/api/average/daily/\", methods=['GET'])\ndef averageDaily():\n # set\n QueryName = request.args.get(\"name\",None)\n print(f\"got name {QueryName}\")\n if QueryName.startswith('\"\"') and QueryName.endswith('\"\"'):\n QueryName = QueryName[2:-2]\n print(f\"revised name {QueryName}\")\n # set\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n #testoutput = mongo_collection.find( { \"Identity\": QueryName }).sort([(\"Likes\",-1)]).limit(1) \n testoutput = mongo_collection.aggregate([{ '$match': { 'Identity': QueryName } },{ '$group': { '_id': 1, 'average': { '$avg': \"$Likes\" } } }])\n # turn into JSONJSON\n print(f\"output {testoutput}\")\n testoutput_listcursor = list(testoutput)\n print(testoutput_listcursor)\n json_data = dumps(testoutput_listcursor, indent=2)\n return json_data\n # query db\n\n@app.route(\"/api/dashboard/\", methods=['GET'])\ndef dashboard():\n # set up pymongo connection\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n\n # set QueryIdentity from API GET, clean out commas\n QueryIdentity = request.args.get(\"name\",None)\n print(f\"got name {QueryIdentity}\")\n if QueryIdentity.startswith('\"') and QueryIdentity.endswith('\"'):\n QueryIdentity = QueryIdentity[1:-1]\n print(f\"revised name {QueryIdentity}\")\n \n # Query Mongo, turn into Dataframe \n identity_df = pd.DataFrame(list(mongo_collection.find({\"Identity\": QueryIdentity})))\n\n # Read In Date / Time \n identity_df['Time'] = pd.to_datetime(identity_df[\"Time\"],format='%H:%M:%S')\n identity_df[\"Date\"] = pd.to_datetime(identity_df[\"Date\"],format=\"%Y-%m-%d\")\n\n # Group DF by Month, get stats \n identity_groupby_month_df = identity_df.groupby(pd.Grouper(key='Date', freq='M'))\n AvgTweetsPerMonth = round(identity_groupby_month_df['Tweet Id'].count().mean())\n AvgLikesPerMonth = round(identity_groupby_month_df['Likes'].sum().mean())\n AvgReTweetsPerMonth = round(identity_groupby_month_df['Retweets'].sum().mean())\n AvgAtMentionsPerMonth = round(identity_groupby_month_df['Total @'].sum().mean())\n AvgHashtagsPerMonth = round(identity_groupby_month_df['Total #'].sum().mean())\n\n # Group DF by Day, get stats \n identity_groupby_day_df = identity_df.groupby(pd.Grouper(key='Date', freq='D'))\n AvgTweetsPerDay = round(identity_groupby_day_df['Tweet Id'].count().mean())\n AvgLikesPerDay = round(identity_groupby_day_df['Likes'].sum().mean())\n AvgReTweetsPerDay = round(identity_groupby_day_df['Retweets'].sum().mean())\n AvgAtMentionsPerDay = round(identity_groupby_day_df['Total @'].sum().mean())\n AvgHashtagsPerDay = round(identity_groupby_day_df['Total #'].sum().mean())\n\n # Get Totals Stats \n TotalTweets = identity_df['Tweet Id'].count()\n TotalLikes = identity_df['Likes'].sum()\n TotalReTweets = identity_df['Retweets'].sum()\n TotalAtMentions = identity_df['Total @'].sum()\n TotalHashtags = identity_df['Total #'].sum()\n\n # Create Scatterplot DF & JSON it\n scatter_df = identity_df[['Likes','Retweets']]\n # scatter_json = scatter_df.to_json(orient=\"records\")\n\n # Combine Stats into a Dictionary\n Tweet_Data_Stats = {\n \"TotalTweets\": int(TotalTweets),\n \"TotalLikes\": int(TotalLikes),\n \"TotalReTweets\": int(TotalReTweets),\n \"TotalAtMentions\": int(TotalAtMentions),\n \"TotalHashtags\": int(TotalHashtags),\n \"AvgTweetsPerDay\": AvgTweetsPerDay,\n \"AvgLikesPerDay\": AvgLikesPerDay,\n \"AvgReTweetsPerDay\": AvgReTweetsPerDay,\n \"AvgAtMentionsPerDay\": AvgAtMentionsPerDay,\n \"AvgHashtagsPerDay\": AvgHashtagsPerDay,\n \"AvgTweetsPerMonth\": AvgTweetsPerMonth,\n \"AvgLikesPerMonth\": AvgLikesPerMonth,\n \"AvgReTweetsPerMonth\": AvgReTweetsPerMonth,\n \"AvgAtMentionsPerMonth\": AvgAtMentionsPerMonth,\n \"AvgHashtagsPerMonth\": AvgHashtagsPerMonth\n }\n\n # Copy Dataframe into Global for other function\n global_df = identity_df.copy()\n\n # Combine Two Dictionaries\n # Tweet_Data_All = [Tweet_Data_Stats,scatter_json]\n \n # Return \n tweet_json_data = dumps(Tweet_Data_Stats, indent=2)\n return tweet_json_data\n\n\n@app.route(\"/api/dashboard/scatter/\", methods=['GET'])\ndef scatterResults():\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Combined_Tweets\"]\n \n # set QueryIdentity from API GET, clean out commas\n QueryIdentity = request.args.get(\"name\",None)\n print(f\"got name {QueryIdentity}\")\n if QueryIdentity.startswith('\"') and QueryIdentity.endswith('\"'):\n QueryIdentity = QueryIdentity[1:-1]\n print(f\"revised name {QueryIdentity}\")\n \n scatter_df = pd.DataFrame(list(mongo_collection.find({\"Identity\": QueryIdentity} ,{ \"_id\": 0, \"Likes\": 1, \"Retweets\" : 1} )))\n scatter_json = scatter_df.to_json(orient=\"records\")\n # scatter_json = dumps(scatter_df)\n\n return scatter_json\n\n\n# @app.route(\"/api/dashboard-old/\", methods=['GET'])\n# def dashboardOld():\n# # set\n# QueryName = request.args.get(\"name\",None)\n# print(f\"got name {QueryName}\")\n# if QueryName.startswith('\"') and QueryName.endswith('\"'):\n# QueryName = QueryName[1:-1]\n# print(f\"revised name {QueryName}\")\n# # set\n# client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n# mongo_db = client[\"Tweets_DB\"]\n# mongo_collection = mongo_db[\"Combined_Tweets\"]\n# #testoutput = mongo_collection.find( { \"Identity\": QueryName }).sort([(\"Likes\",-1)]).limit(1)\n# average = mongo_collection.aggregate([{ '$match': { 'Identity': QueryName } },{ '$group': { '_id': 1, 'average': { '$avg': \"$Likes\" } } }])\n# totalLikes = mongo_collection.aggregate([{ '$match': { 'Identity': QueryName } },{ '$group': { '_id': 1, 'total': { '$sum': \"$Likes\" } } }])\n# totalRetweets = mongo_collection.aggregate([{ '$match': { 'Identity': QueryName } },{ '$group': { '_id': 1, 'total': { '$sum': \"$Retweets\" } } }])\n# # turn into JSONJSON\n# responseAverage = list(average)\n# responseTotalLikes = list(totalLikes)\n# responsetotalRetweets=list(totalRetweets)\n# print(f\"average {responseAverage} \")\n# print(f\"totalLikes {responseTotalLikes}\")\n# print(f\"totalRetweets{responsetotalRetweets}\")\n# json_average = dumps(responseAverage, indent=2)\n# json_totalLikes = dumps(responseTotalLikes, indent=2)\n# json_totalRetweets = dumps(responsetotalRetweets, indent=2)\n# response = jsonify(json_average,json_totalLikes,json_totalRetweets)\n# return response\n\n# A welcome message to test our server\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/api/ARC_Diagram/')\ndef arcdiagram():\n client = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n mongo_db = client[\"Tweets_DB\"]\n mongo_collection = mongo_db[\"Arc_Diagram\"]\n\n ARC_output = mongo_collection.find_one( {} )\n\n for entry in ARC_output:\n print(entry)\n\n arc_json = dumps(ARC_output, indent=2)\n\n return arc_json\n\n@app.route('/ARC_Diagram/')\ndef arcindex():\n return render_template(\"arc_index.html\")\n\n@app.route('/wordcloudpage/')\ndef wordcloudpage():\n return render_template(\"wordcloud.html\")\n\n# myclient = pymongo.MongoClient(\"mongodb+srv://AtlasTwitter:1FineTwitterApp!@twittercluster.ycq9k.mongodb.net/\")\n# mydb = myclient[\"testDB\"]\n# mycol = mydb[\"test\"]\n# testresults = mycol.find()\n# testlist = []\n# for x in testresults:\n# testlist.append(x)\n# print(x)\n\n# htmlstring = \"\"\"\n#

Welcome to our server !!

\n#

Created by Twitterazi: Dave


\n# \"TwitterIcon\"
\n# \"\"\"\n# returnstring = htmlstring + str(testlist)\n\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=5000)","repo_name":"dhsharp1983/TwitterUsers_v2","sub_path":"Deleteme/FlaskApp_v1.py","file_name":"FlaskApp_v1.py","file_ext":"py","file_size_in_byte":14674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22186283981","text":"import pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nm = 500\n\nruns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\nT_u, T_a, T_c, T_d, T_o = [], [], [], [], []\n\nfor i in runs:\n regret_df = pd.read_csv('regrets_' + str(i) + '.csv', index_col=0, header=None)\n regret_df.index = range(5)\n print(regret_df.sum(axis=1))\n regret_df = regret_df.loc[[0, 1, 2, 3, 4]]\n regret_df = (1000 * regret_df)/regret_df.sum(axis=1)[0]\n\n # print(i)\n # print(regret_df.sum(axis=1))\n\n T_u.append(list(regret_df.loc[0]))\n T_a.append(list(regret_df.loc[1]))\n T_c.append(list(regret_df.loc[2]))\n T_d.append(list(regret_df.loc[3]))\n T_o.append(list(regret_df.loc[4]))\n\nT_u = pd.DataFrame(T_u)\nT_a = pd.DataFrame(T_a)\nT_c = pd.DataFrame(T_c)\nT_d = pd.DataFrame(T_d)\nT_o = pd.DataFrame(T_o)\n\nT_u = T_u.cumsum(axis=1).to_numpy()\nT_a = T_a.cumsum(axis=1).to_numpy()\nT_c = T_c.cumsum(axis=1).to_numpy()\nT_d = T_d.cumsum(axis=1).to_numpy()\nT_o = T_o.cumsum(axis=1).to_numpy()\n\nfor l in T_u:\n for i in range(len(l)):\n l[i] = l[i]/(i + 1)\n\nfor l in T_a:\n for i in range(len(l)):\n l[i] = l[i]/(i + 1)\n\nfor l in T_c:\n for i in range(len(l)):\n l[i] = l[i]/(i + 1)\n\nfor l in T_d:\n for i in range(len(l)):\n l[i] = l[i]/(i + 1)\n\nfor l in T_o:\n for i in range(len(l)):\n l[i] = l[i]/(i + 1)\n\nT_u = pd.DataFrame(T_u)\nT_a = pd.DataFrame(T_a)\nT_c = pd.DataFrame(T_c)\nT_d = pd.DataFrame(T_d)\nT_o = pd.DataFrame(T_o)\n\nmatplotlib.rcParams.update({'font.size': 20})\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\nquantile25_u = T_u.quantile(0.25, axis=0)\nquantile25_a = T_a.quantile(0.25, axis=0)\nquantile25_c = T_c.quantile(0.25, axis=0)\nquantile25_d = T_d.quantile(0.25, axis=0)\nquantile25_o = T_o.quantile(0.25, axis=0)\n\nquantile75_u = T_u.quantile(0.75, axis=0)\nquantile75_a = T_a.quantile(0.75, axis=0)\nquantile75_c = T_c.quantile(0.75, axis=0)\nquantile75_d = T_d.quantile(0.75, axis=0)\nquantile75_o = T_o.quantile(0.75, axis=0)\n\naverage_u = T_u.mean(axis=0)\naverage_a = T_a.mean(axis=0)\naverage_c = T_c.mean(axis=0)\naverage_d = T_d.mean(axis=0)\naverage_o = T_o.mean(axis=0)\n\nplt.title(r'Regret-over-time for OMD-AFW variants')\nplt.xlabel(r'Iterations')\nplt.ylabel(r'Regret/time (normzalied)')\n\n# plt.xscale('log')\n# plt.yscale('log')\n\n# plt.ylim(ymin=10, ymax=1200)\n# plt.xlim(xmin=0, xmax=500)\n\nplt.fill_between(range(m), quantile25_u, quantile75_u, color='green', alpha=0.1)\nplt.fill_between(range(m), quantile25_a, quantile75_a, color='red', alpha=0.1)\nplt.fill_between(range(m), quantile25_c, quantile75_c, color='blue', alpha=0.1)\nplt.fill_between(range(m), quantile25_d, quantile75_d, color='orange', alpha=0.1)\nplt.fill_between(range(m), quantile25_o, quantile75_o, color='orange', alpha=0.1)\n\naverage_u_plot, = plt.plot(average_u, color='green', label='OMD-UAFW')\naverage_a_plot, = plt.plot(average_a, color='red', label='OMD-ASAFW')\naverage_c_plot, = plt.plot(average_c, color='blue', label='OMD-TSAFW')\naverage_d_plot, = plt.plot(average_d, color='orange', label='OMD-A$^2$FW')\naverage_o_plot, = plt.plot(average_o, color='cyan', label='OFW')\n\nplt.tight_layout()\n\nplt.legend(loc='lower right')\nplt.savefig('regret_over_time.png', dpi=300)\nplt.show()\n","repo_name":"jaimoondra/submodular-polytope-projections","sub_path":"code/plot_omd_regret_over_time.py","file_name":"plot_omd_regret_over_time.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4044921290","text":"from django.core import serializers\n\nfrom dajax.core import Dajax\nfrom dajaxice.decorators import dajaxice_register\n\nfrom .models import Position\n\n\n@dajaxice_register\ndef setSequences(request, value):\n dajax = Dajax()\n\n try:\n sequences = Position.objects.get(pk=value).sequence_set.all().order_by('sequence_num')\n json = serializers.serialize(\"json\", sequences)\n dajax.add_data(json, 'setSequences')\n except ValueError:\n dajax.add_data(None, 'setSequences')\n\n return dajax.json()\n\n\n@dajaxice_register\ndef noargsTest(request):\n dajax = Dajax()\n\n dajax.alert(\"No arguments!\")\n\n return dajax.json()\n","repo_name":"SevenDeadlySins/ssu-apps","sub_path":"ssu_apps/key_control/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25351547965","text":"import unittest\nimport numpy as np\n\nclass TestSinEmbedding(unittest.TestCase):\n def test_embedding(self):\n from aprec.recommenders.sequential.models.sasrec.sasrec import ExpPositionEncoding, SinePositionEncoding\n sinEncoder = SinePositionEncoding(50, 64)\n input = np.array([[0, 1, 2, 3],[1,2,3,4]])\n encoded = sinEncoder(input)\n self.assertEqual(encoded.shape, (2, 4, 64))\n\n expEncoder = ExpPositionEncoding(50, 64)\n input = np.array([[0, 1, 2, 3],[1,2,3,4]])\n encoded = expEncoder(input)\n self.assertEqual(encoded.shape, (2, 4, 64))\n\nif __name__== \"__main__\":\n unittest.main()","repo_name":"asash/gsasrec","sub_path":"tests/recommenders/sequential/sasrec/test_positional_encoding.py","file_name":"test_positional_encoding.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"73573199762","text":"import os\nfrom shutil import copyfile\n\ndef process_directory(directory, function, file_ext=None):\n for entry in os.listdir(directory):\n path = os.path.join(directory, entry)\n if os.path.isdir(path):\n process_directory(path, function, file_ext)\n elif file_ext == None or path.lower().endswith(file_ext.lower()):\n function(path)\n\ndef copy_file(file_path):\n file_path = file_path.replace(\"\\\\\", \"/\")\n file_name = file_path[file_path.rfind(\"/\")+1:]\n copyfile(file_path, \"Flat/%s\" % file_name)\n\nprocess_directory(\"SegmentedPlants\", copy_file)\n","repo_name":"bitsauce/CSE291-I00_ML_on_3D_Data","sub_path":"LabelMeshes/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34649495138","text":"# https://gigabaseorgigabyte.wordpress.com/2017/08/08/a-twitter-bot-to-find-the-most-interesting-biorxiv-preprints/\nimport logging\nfrom altmetric import Altmetric, AltmetricHTTPException\n\nlogger = logging.getLogger(__name__)\n\ndef queryAltmetric(pmid):\n # Check the altmetric journal percentile score of the publication\n a = Altmetric()\n try:\n resp = a.pmid(pmid)\n if resp is None:\n logger.debug(\"PMID %s. Not found\" % pmid)\n return -1\n else:\n if 'context' in resp:\n metric = resp['context']['journal']['pct'] # Percentage attention for this journal\n logger.debug(\"PMID %s. Metric %s\" % (pmid, metric))\n return metric\n logger.debug(\"PMID %s. Percentage attention not found\" % pmid)\n return -2\n except AltmetricHTTPException as e:\n if e.status_code == 403:\n logger.error(\"You aren't authorized for this call: {}\".format(pmid))\n elif e.status_code == 420:\n logger.error('You are being rate limited, currently {}'.format(pmid))\n elif e.status_code == 502:\n logger.error('The API version you are using is currently down for maintenance.')\n elif e.status_code == 404:\n logger.error('Invalid API function')\n logger.error(e.msg)\n logger.warn(\"PMID %s. Exception %s\" % (pmid, e.msg))\n return -3\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n queryAltmetric('29554097')\n queryAltmetric('29545237')\n queryAltmetric('29552423')\n\n","repo_name":"wenwei-dev/PubMedMetrics","sub_path":"pubmedmetrics/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"26029021607","text":"# -*- coding:utf-8 -*-\nimport sys\nimport numpy as np\nimport pandas as pd\nimport os\nn_gram = int(sys.argv[1])\nprint(\"n = \", n_gram)\nkind_pth = r\"F:\\my_project\\apk_sample\\kind\"\ncntKind = len(os.listdir(kind_pth))\n\ndef sigmoid(z):\n return 1.0 / (1.0 + np.exp(-z))\n\n\ndef dsigmoid(z):\n return sigmoid(z) * (1.0 - sigmoid(z))\n\n\ndef data_process(dataPath):\n global xTrain, yTrain\n xFrame = pd.read_csv(dataPath, usecols=np.arange(0,7**n_gram).tolist())\n yFrame = pd.read_csv(dataPath, usecols=[7**n_gram])\n xTrain = np.array(xFrame).astype(int)\n yTrain = np.array(yFrame).astype(int)\n # print(yTrain.shape) # (222, 1)\n return xTrain, yTrain\n\n\ndef logistics_regression(xTrain, yTrain):\n weight = np.zeros(len(xTrain[0]))\n # print(weight)\n bias = 0.0\n learning_rate = 0.0001\n epochs = 1000\n w_sum = np.zeros(len(xTrain[0]))\n b_sum = 0.0\n\n for i in range(epochs):\n # 记录一次迭代的梯度总和\n w_grad = np.zeros(len(xTrain[0]))\n b_grad = 0.0\n for j in range(len(xTrain)):\n w_grad += (-xTrain[j]).T * (yTrain[j]-sigmoid(xTrain[j].T.dot(weight)+bias))\n b_grad += (-1) * (yTrain[j]-sigmoid(xTrain[j].T.dot(weight)+bias))\n w_grad /= len(xTrain)\n b_grad /= len(xTrain)\n # adagad\n w_sum += w_grad ** 2\n b_sum += b_grad ** 2\n # gradient descent\n weight -= learning_rate/np.sqrt(w_sum) * w_grad\n bias -= learning_rate/np.sqrt(b_sum) * b_grad\n # print(weight)\n # print(bias)\n return weight, bias\n\n\ndef test_model(weight, bias):\n # show the weight and bias\n # print(weight)\n # print(bias)\n # test model\n res = []\n for i in range(len(xTrain)):\n # output from the model\n tmp = xTrain[i].T.dot(weight) + bias\n if sigmoid(tmp) >= 0.5:\n res.append(1)\n else:\n res.append(0)\n sumOfKind, sumOfVirus = 0., 0.\n for i in range(cntKind):\n if res[i] == yTrain[i]:\n sumOfKind += 1\n for i in np.arange(cntKind, len(yTrain)):\n if res[i] == yTrain[i]:\n sumOfVirus += 1\n print('accuracy in testing data')\n print('accuracy in kind sample: %.3f' % (100 * sumOfKind / cntKind) + '%')\n # print(len(yTrain)) # 由于加壳会导致反编译失败,因此实际得到的样本数量小于收集的\n print('accuracy in virus sample: %.3f' % (100 * sumOfVirus / (len(yTrain)-cntKind)) + '%')\n\n\ndef main():\n dataPath = str(n_gram)+'_gram.csv'\n x, y = data_process(dataPath)\n weight, bias = logistics_regression(x, y)\n test_model(weight, bias)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ACEVERYDAY/MalwareDetection_","sub_path":"n-gramTool/logisticsRegression.py","file_name":"logisticsRegression.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"10639728054","text":"import yfinance as yf\nimport matplotlib.pyplot as plt\n\ndef moving_averages(tickers):\n # Loop through each stock\n buy_list = []\n sell_list = []\n for ticker in tickers:\n print(f'Processing {ticker}')\n\n # Run 5 year algorithm\n data = yf.download(ticker, period='5y', interval='1d')\n data['Open_avg_200'] = data['Open'].rolling(window=200).mean()\n data['Open_avg_50'] = data['Open'].rolling(window=50).mean()\n\n if data['Open_avg_50'][-1] > data['Open_avg_200'][-1]:\n buy_list.append(ticker)\n else:\n sell_list.append(ticker)\n\n data.plot(y=['Open', 'Open_avg_50', 'Open_avg_200'], title=ticker)\n plt.savefig(f'images/{ticker}_movingAvg.png')\n\n return buy_list, sell_list","repo_name":"Trevormccants27/Stock_alerts","sub_path":"moving_averages.py","file_name":"moving_averages.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"8517608662","text":"from game5.utilities import *\r\n\r\nclass JuegoRana:\r\n RUTA_RANA = \"game5/assets/renders/RanaP.png\"\r\n RUTA_RANA_INV = \"game5/assets/renders/RanaPInvertida.png\"\r\n RUTA_HOJA = \"game5/assets/renders/NenufarP.png\"\r\n NUM_SECUENCIAS_NIVEL = [3,4,5,6,7,8,9,10,11,12,13,14,15,16]\r\n NUM_SECUENCIAS_NIVEL_PRUEBA = [5,7,8,9,10]\r\n NUM_HOJAS = 5\r\n def __init__(self, tipoJuego):\r\n self.niveles = []\r\n self.hojas = []\r\n self.nivelActual = 0\r\n self.nivelPruebaActual = 0\r\n self.cargarHojas()\r\n if tipoJuego == 0:\r\n self.cargarNivelesPrueba()\r\n else:\r\n self.cargarNiveles()\r\n\r\n def generarResultados(self):\r\n resultadosRondas = \"\"\r\n for nivel in self.niveles:\r\n segundos = nivel.segundos % 60\r\n minutos = int(nivel.segundos / 60)\r\n resultadosRondas += \"\\tRonda \"+str(nivel.numNivel)+\": Aciertos: \"+str(nivel.aciertos)+\", Errores: \"+str(nivel.errores)+\", Tiempo: \"+str(minutos)+\":\"+str(segundos)+\"m.\\n\"\r\n return resultadosRondas\r\n\r\n def obtenerNivel(self):\r\n if self.nivelActual < len(self.niveles):\r\n nivel = self.niveles[self.nivelActual]\r\n self.nivelActual += 1\r\n return nivel\r\n else:\r\n return None\r\n \r\n def cargarHojas(self):\r\n x, y = 190, 0\r\n for hojax in range(self.NUM_HOJAS):\r\n y = 200\r\n hojasFila = []\r\n for hojay in range(self.NUM_HOJAS):\r\n hojasFila.append(Hoja(x,y, self.RUTA_HOJA))\r\n y += 100\r\n self.hojas.append(hojasFila)\r\n x += 90\r\n\r\n def cargarNivelesPrueba(self):\r\n index = 0\r\n \r\n for iNivelPrueba in range(0, len(self.NUM_SECUENCIAS_NIVEL_PRUEBA)):\r\n camino = []\r\n rndInicial = numeroAleatorio(0, self.NUM_HOJAS-1)\r\n hojaInicial = self.hojas[0][rndInicial]\r\n rndFinal = numeroAleatorio(0, self.NUM_HOJAS-1)\r\n hojaFinal = self.hojas[self.NUM_HOJAS -1][rndFinal]\r\n termino = False\r\n camino.append(hojaInicial)\r\n numHojasNivel = self.NUM_SECUENCIAS_NIVEL_PRUEBA[iNivelPrueba]\r\n while not termino:\r\n x, y = numeroAleatorio(1, self.NUM_HOJAS-2), numeroAleatorio(0, self.NUM_HOJAS-1)\r\n miHoja = self.hojas[x][y]\r\n if len(camino) == numHojasNivel-1:\r\n termino = True\r\n elif miHoja not in camino:\r\n camino.append(miHoja)\r\n camino.append(hojaFinal)\r\n self.niveles.append(Nivel(iNivelPrueba, camino, Nivel.TIPO_NIVEL_PROGRESIVO))\r\n index = iNivelPrueba\r\n\r\n index += 1\r\n \r\n for iNivelPrueba in range(0, len(self.NUM_SECUENCIAS_NIVEL_PRUEBA)):\r\n nivelPrueba = self.niveles[iNivelPrueba]\r\n newArray = nivelPrueba.camino[::-1]\r\n self.niveles.append(Nivel(index, newArray, Nivel.TIPO_NIVEL_REGRESIVO))\r\n index +=1\r\n\r\n def cargarNiveles(self):\r\n index = 0\r\n\r\n for iNivel in range(0, len(self.NUM_SECUENCIAS_NIVEL)):\r\n camino = []\r\n rndInicial = numeroAleatorio(0, self.NUM_HOJAS-1)\r\n hojaInicial = self.hojas[0][rndInicial]\r\n rndFinal = numeroAleatorio(0, self.NUM_HOJAS-1)\r\n hojaFinal = self.hojas[self.NUM_HOJAS -1][rndFinal]\r\n termino = False\r\n camino.append(hojaInicial)\r\n numHojasNivel = self.NUM_SECUENCIAS_NIVEL[iNivel]\r\n while not termino:\r\n x, y = numeroAleatorio(1, self.NUM_HOJAS-2), numeroAleatorio(0, self.NUM_HOJAS-1)\r\n miHoja = self.hojas[x][y]\r\n if len(camino) == numHojasNivel-1:\r\n termino = True\r\n elif miHoja not in camino:\r\n camino.append(miHoja)\r\n camino.append(hojaFinal)\r\n self.niveles.append(Nivel(iNivel, camino, Nivel.TIPO_NIVEL_PROGRESIVO))\r\n index = iNivel\r\n \r\n index += 1\r\n\r\n for iNivel in range(0, len(self.NUM_SECUENCIAS_NIVEL)):\r\n nivel = self.niveles[iNivel]\r\n newArray = nivel.camino[::-1]\r\n self.niveles.append(Nivel(index, newArray, Nivel.TIPO_NIVEL_REGRESIVO))\r\n index +=1\r\n \r\nclass Nivel:\r\n TIPO_NIVEL_REGRESIVO = \"Regresivo\"\r\n TIPO_NIVEL_PROGRESIVO = \"Progresivo\"\r\n def __init__(self, numNivel, camino, tipoNivel):\r\n self.numNivel = numNivel\r\n self.numPaso = 0\r\n self.segundos = 0\r\n self.aciertos = 0\r\n self.errores = 0\r\n self.camino = camino\r\n self.tipoNivel = tipoNivel\r\n self.indiceMovimiento = 0\r\n\r\n def hayMasMovimientos(self):\r\n return self.numPaso < len(self.camino)\r\n\r\n def calcularJugada(self, x, y):\r\n movimiento = self.camino[self.numPaso]\r\n if movimiento.x == x and movimiento.y == y:\r\n self.aciertos += 1\r\n else:\r\n self.errores += 1\r\n self.numPaso += 1\r\n\r\n def obtenerMovimiento(self):\r\n if self.indiceMovimiento < len(self.camino):\r\n movimiento = self.camino[self.indiceMovimiento]\r\n self.indiceMovimiento += 1\r\n return movimiento\r\n else:\r\n return None\r\n\r\n def actualizarSegundos(self, segundos):\r\n self.segundos = segundos\r\n \r\n\r\nclass Hoja:\r\n def __init__(self, x, y, ruta):\r\n self.x = x\r\n self.y = y\r\n self.ruta = ruta","repo_name":"SGarcia710/python-games","sub_path":"game5/JuegoRana.py","file_name":"JuegoRana.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42204718075","text":"from django.urls import include, path\nfrom knox.views import LogoutView\nfrom rest_framework import routers\n\nfrom api.views import (\n BatchView,\n CategoryView,\n CompleteSaleView,\n LoginView,\n ProductView,\n SaleHistoryView,\n register,\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(\"products\", ProductView, \"products\")\nrouter.register(\"categories\", CategoryView, \"categories\")\nrouter.register(\"salehistory\", SaleHistoryView, \"salehistory\")\nrouter.register(\"sales\", CompleteSaleView, \"sales\")\nrouter.register(\"batchs\", BatchView, \"batchs\")\nurlpatterns = [\n path(r\"\", include(router.urls)),\n path(r\"login/\", LoginView.as_view(), name=\"knox_login\"),\n path(r\"logout/\", LogoutView.as_view(), name=\"knox_logout\"),\n path(r\"register/\", register),\n]\n","repo_name":"Osc100/bullet_sales_system","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36319753874","text":"import numbers\n\n\ndef pointsEqual(a, b):\n \"\"\"\n checks if 2 [x, y] points are equal\n \"\"\"\n for i in range(0, len(a)):\n if a[i] != b[i]:\n return False\n return True\n\n\ndef closeRing(coordinates):\n \"\"\"\n checks if the first and last points of a ring are equal and closes the ring\n \"\"\"\n if not pointsEqual(coordinates[0], coordinates[len(coordinates) - 1]):\n coordinates.append(coordinates[0])\n return coordinates\n\n\ndef ringIsClockwise(ringToTest):\n \"\"\"\n determine if polygon ring coordinates are clockwise. clockwise signifies\n outer ring, counter-clockwise an inner ring or hole.\n \"\"\"\n\n total = 0\n i = 0\n rLength = len(ringToTest)\n pt1 = ringToTest[i]\n pt2 = None\n for i in range(0, rLength - 1):\n pt2 = ringToTest[i + 1]\n total += (pt2[0] - pt1[0]) * (pt2[1] + pt1[1])\n pt1 = pt2\n\n return (total >= 0)\n\n\ndef vertexIntersectsVertex(a1, a2, b1, b2):\n uaT = (b2[0] - b1[0]) * (a1[1] - b1[1]) - (b2[1] - b1[1]) * (a1[0] - b1[0])\n ubT = (a2[0] - a1[0]) * (a1[1] - b1[1]) - (a2[1] - a1[1]) * (a1[0] - b1[0])\n uB = (b2[1] - b1[1]) * (a2[0] - a1[0]) - (b2[0] - b1[0]) * (a2[1] - a1[1])\n\n if uB != 0:\n ua = uaT / uB\n ub = ubT / uB\n\n if ua >= 0 and ua <= 1 and ub >= 0 and ub <= 1:\n return True\n\n return False\n\n\ndef arrayIntersectsArray(a, b):\n for i in range(0, len(a) - 1):\n for j in range(0, len(b) - 1):\n if vertexIntersectsVertex(a[i], a[i + 1], b[j], b[j + 1]):\n return True\n\n return False\n\n\ndef coordinatesContainPoint(coordinates, point):\n\n contains = False\n l = len(coordinates)\n i = -1\n j = l - 1\n while ((i + 1) < l):\n i = i + 1\n ci = coordinates[i]\n cj = coordinates[j]\n if ((ci[1] <= point[1] and point[1] < cj[1]) or (cj[1] <= point[1] and point[1] < ci[1])) and\\\n (point[0] < (cj[0] - ci[0]) * (point[1] - ci[1]) / (cj[1] - ci[1]) + ci[0]):\n contains = not contains\n j = i\n return contains\n\n\ndef coordinatesContainCoordinates(outer, inner):\n intersects = arrayIntersectsArray(outer, inner)\n contains = coordinatesContainPoint(outer, inner[0])\n if not intersects and contains:\n return True\n return False\n\n\ndef convertRingsToGeoJSON(rings):\n \"\"\"\n do any polygons in this array contain any other polygons in this array?\n used for checking for holes in arcgis rings\n \"\"\"\n\n outerRings = []\n holes = []\n x = None # iterator\n outerRing = None # current outer ring being evaluated\n hole = None # current hole being evaluated\n\n # for each ring\n for r in range(0, len(rings)):\n ring = closeRing(rings[r])\n if len(ring) < 4:\n continue\n\n # is this ring an outer ring? is it clockwise?\n if ringIsClockwise(ring):\n polygon = [ring]\n outerRings.append(polygon) # push to outer rings\n else:\n holes.append(ring) # counterclockwise push to holes\n\n uncontainedHoles = []\n\n # while there are holes left...\n while len(holes):\n # pop a hole off out stack\n hole = holes.pop()\n\n # loop over all outer rings and see if they contain our hole.\n contained = False\n x = len(outerRings) - 1\n while (x >= 0):\n outerRing = outerRings[x][0]\n if coordinatesContainCoordinates(outerRing, hole):\n # the hole is contained push it into our polygon\n outerRings[x].append(hole)\n contained = True\n break\n x = x - 1\n\n # ring is not contained in any outer ring\n # sometimes this happens https://github.com/Esri/esri-leaflet/issues/320\n if not contained:\n uncontainedHoles.append(hole)\n\n # if we couldn't match any holes using contains we can try intersects...\n while len(uncontainedHoles):\n # pop a hole off out stack\n hole = uncontainedHoles.pop()\n\n # loop over all outer rings and see if any intersect our hole.\n intersects = False\n x = len(outerRings) - 1\n while (x >= 0):\n outerRing = outerRings[x][0]\n if arrayIntersectsArray(outerRing, hole):\n # the hole is contained push it into our polygon\n outerRings[x].append(hole)\n intersects = True\n break\n x = x - 1\n\n if not intersects:\n outerRings.append([hole[::-1]])\n\n if len(outerRings) == 1:\n return {\n 'type': 'Polygon',\n 'coordinates': outerRings[0]\n }\n else:\n return {\n 'type': 'MultiPolygon',\n 'coordinates': outerRings\n }\n\n\ndef arcgis2geojson(arcgis, idAttribute=None):\n \"\"\"\n Convert an ArcGIS JSON object to a GeoJSON object\n \"\"\"\n\n geojson = {}\n\n if 'x' in arcgis and isinstance(arcgis['x'], numbers.Number) and 'y' in arcgis and isinstance(arcgis['y'], numbers.Number):\n geojson['type'] = 'Point'\n geojson['coordinates'] = [arcgis['x'], arcgis['y']]\n\n if 'points' in arcgis:\n geojson['type'] = 'MultiPoint'\n geojson['coordinates'] = arcgis['points']\n\n if 'paths' in arcgis:\n if len(arcgis['paths']) == 1:\n geojson['type'] = 'LineString'\n geojson['coordinates'] = arcgis['paths'][0]\n else:\n geojson['type'] = 'MultiLineString'\n geojson['coordinates'] = arcgis['paths']\n\n if 'rings' in arcgis:\n geojson = convertRingsToGeoJSON(arcgis['rings'])\n\n if 'geometry' in arcgis or 'attributes' in arcgis:\n geojson['type'] = 'Feature'\n if 'geometry' in arcgis:\n geojson['geometry'] = arcgis2geojson(arcgis['geometry'])\n else:\n geojson['geometry'] = None\n\n if 'attributes' in arcgis:\n geojson['properties'] = arcgis['attributes']\n if idAttribute in arcgis['attributes']:\n geojson['id'] = arcgis['attributes'][idAttribute]\n elif 'OBJECTID' in arcgis['attributes']:\n geojson['id'] = arcgis['attributes']['OBJECTID']\n elif 'FID' in arcgis['attributes']:\n geojson['id'] = arcgis['attributes']['FID']\n else:\n geojson['properties'] = None\n\n if 'geometry' in geojson and not(geojson['geometry']):\n geojson['geometry'] = None\n\n return geojson\n","repo_name":"FireCARES/firecares","sub_path":"firecares/utils/arcgis2geojson.py","file_name":"arcgis2geojson.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"348205003","text":"import ops\nimport utils\n\nbudget = int(input(\"Enter Your Budget: \"))\nchoice = 0\nGroceryList = []\nwhile True:\n choice = ops.getChoice()\n if choice == 1:\n item, check, budget = utils.addItem(budget)\n if check == 200:\n GroceryList = utils.alreadyPresent(GroceryList, item.price, item.quantity, item.name)\n elif check == 406:\n print(\"No money left\\n\")\n elif check == 403:\n print(\"Over price, item not added\\n\")\n else:\n utils.printItems(GroceryList)\n utils.endingBudget(budget, GroceryList)\n break\n","repo_name":"NavyaVerma/accelerize360-task3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17530246534","text":"import sys\nsys.path.append('/home/parallels/ubuntu_op/Block_chain')\nsys.path.append('/home/parallels/ubuntu_zk/Block_chain')\nfrom functions import *\nexcel_path= '/home/parallels/ubuntu_zk/Block_chain/eth1000_OP_操作后.xlsx'\n\nwrite_success_to_excel_column = \"E\" #把成功或失败记录到excel的列\nread_from_excel_column = \"E\" #从excel中的哪一列读取状态? 判断是不是要做任务?\nexcel_start_row = 2\nbrowser_wait_times = 15\n\nwhile 1: #到时要把对应的txt行数删掉\n for i in range(2, 101):\n success_or_fail = Do_Excel(excel_path,sheetname='SheetJS').read(i, read_from_excel_column)\n if success_or_fail != \"成功\":\n print(f\"第{i} 个号需要mint NFT \")\n try:\n ##=========== 准备浏览器\n wait, browser = my_linux_chrome(time_out = browser_wait_times)\n\n ##=========== 预备步骤:切换IP。先打开\n open_clash_dashboard(browser, wait, url_dashboard)\n random_select_clash_ip(browser, wait)\n\n ##=========== 清理缓存(从上面新建的标签页里,打开下面的链接)\n delete_cookie(browser)\n\n ##============ 登陆小狐狸,\n login_metamask(browser, wait, metamask_pw, metamask_home)\n switch_tab_by_handle(browser, 1, 0) # 切换到小狐狸\n\n #=======小狐狸换号\n print(f\"==============开始换号{i} ==============\")\n fox_change_account(browser, wait, i) # 换号,选列表里的\n\n ##=========== 开始做任务\n CID_text = DO_TXT(r\"../json_CID.txt\", i).read_x_line()\n print(\"这次用的CID_text是\", CID_text)\n save_record = zksync_mint_NFT(browser, wait, CID_text)\n\n print(\"记录是:\", save_record)\n if \"成功\" in save_record:\n Do_Excel(excel_path,sheetname='SheetJS').plain_write(i, write_success_to_excel_column, \"成功\")\n else:\n Do_Excel(excel_path, sheetname='SheetJS').plain_write(i, write_success_to_excel_column, \"×\")\n ##=========== 这里要设置随机等待时间\n a = random.randint(10, 15)\n time_sleep(a, f\"++++++++++随机等待时间{a}\")\n browser.quit()\n a = random.randint(10, 15)\n time_sleep(a, f\"++++++++++随机等待时间{a}\")\n #\n except:\n ##=========== 使用指定工作表,保存信息到excel\n print(f\"----第{i}出错了,是excel没关闭吗?\")\n Do_Excel(excel_path).plain_write(i, write_success_to_excel_column, \"×\")\n time_sleep(6, \"出错了\")\n browser.quit()\n continue\n\n","repo_name":"alveraboquet/Block_chain","sub_path":"scripts_on_ubuntu/L2_project/ZK/sync_swap/zk_mint_NFT.py","file_name":"zk_mint_NFT.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"1038190005","text":"# -*- coding: utf-8 -*-\n\n\ndef aa(nums):\n if not nums: return\n\n v = max(nums)\n idx = nums.index(v)\n root = TreeNode(v)\n l, r = nums[:idx], nums[idx+1:]\n root.left = aa(left)\n root.right = aa(right)\n return root\n","repo_name":"yhxjack/algorithm","sub_path":"leetcode/654-最大二叉树.py","file_name":"654-最大二叉树.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18888400542","text":"from tkinter import *\ndef getvals():\n print(f\"Your Name is :{namevalue.get()}\")\n print(f\"Your PhoneNumber is :{phonevalue.get()}\")\n print(f\"Your Gender is :{gendervalue.get()}\")\n print(f\"Your Emergency Number is :{emergencyvalue.get()}\")\n print(f\"Your PaymentMode is :{paymentmodevalue.get()}\")\n print(f\"Your prebookingfood service value is :{foodservicevalue.get()}\")\n\n #first use \"w\" to create file and then use \"a\" to append the file\n with open(\"records.txt\",\"a\") as f:\n f.write(f\"{namevalue.get(),phonevalue.get(),gendervalue.get(),emergencyvalue.get(),paymentmodevalue.get(),foodservicevalue.get()}\")\n\nroot=Tk()\nroot.title(\"Travels\")\nroot.geometry(\"655x333\")\nLabel(root,text=\"Welcome to Ankit Travels\",font=\"comicsansms 9 bold\").grid(row=0,column=3)\nname=Label(root,text=\"Name\")\nphone=Label(root,text=\"Phone\")\ngender=Label(root,text=\"Gender\")\nemergency=Label(root,text=\"Emergency\")\npaymentmode=Label(root,text=\"PaymentMode\")\nname.grid(row=1 ,column=2)\nphone.grid(row=2 ,column=2)\ngender.grid(row=3 ,column=2)\nemergency.grid(row=4 ,column=2)\npaymentmode.grid(row=5 ,column=2)\n\n#Declaring variables\nnamevalue=StringVar()\nphonevalue=StringVar()\ngendervalue=StringVar()\nemergencyvalue=StringVar()\npaymentmodevalue=StringVar()\nfoodservicevalue=IntVar()\n\n#entries variables\nnameentry=Entry(root,textvariable=namevalue)\nphoneentry=Entry(root,textvariable=phonevalue)\ngenderentry=Entry(root,textvariable=gendervalue)\nemergencyentry=Entry(root,textvariable=emergencyvalue)\npaymentmodeentry=Entry(root,textvariable=paymentmodevalue)\n\nnameentry.grid(row= 1 , column= 3)\nphoneentry.grid(row= 2 , column= 3 )\ngenderentry.grid(row= 3 , column= 3 )\nemergencyentry.grid(row= 4 , column=3)\npaymentmodeentry.grid(row=5 , column=3)\n\n#CheckButton\nfoodservice=Checkbutton(root,text=\"Want to pre book your meals\", variable=foodservicevalue)\nfoodservice.grid(row=6 , column=3)\n\n\nButton(root,text=\"Submit to Ankit Travel\",command=getvals).grid(row=7,column=3)\n\nroot.mainloop()","repo_name":"Ankit-Developer143/Tkinter-GUI","sub_path":"CheckButton_EntryWidgets.py","file_name":"CheckButton_EntryWidgets.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12207591792","text":"import argparse\nimport logging\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n\ndef main(args=None):\n if not args:\n args = process_command_line()\n logger.debug(\"Arguments: %s\", args)\n\n try:\n report_code_coverage(args.bullseye, args.covfile)\n if args.html:\n generate_html(args.bullseye, args.covfile, args.html_path)\n except:\n logger.exception(\"Fatal error\")\n return -1\n\n return 0\n\n\ndef process_command_line():\n parser = argparse.ArgumentParser()\n setup_parser(parser)\n return parser.parse_args()\n\n\ndef setup_parser(root_parser):\n root_parser.add_argument(\n \"--bullseye\",\n type=Path,\n default=Path(\"/opt/BullseyeCoverage\"),\n help=\"path to Bullseye installation directory\",\n )\n root_parser.add_argument(\n \"--covfile\",\n type=Path,\n default=Path(\"../test.cov\"),\n help=\"path to Bullseye coverage file\",\n )\n root_parser.add_argument(\n \"--html\",\n action=\"store_true\",\n help=\"generate HTML report in addition to the regular one\",\n )\n root_parser.add_argument(\n \"--html-path\",\n type=Path,\n default=Path(\"../coverage_report\"),\n help=\"destination directory for HTML report\",\n )\n\n\ndef report_code_coverage(bullseye_path, covfile_path):\n bullseye_bin_path = bullseye_path / \"bin\"\n\n covsrc_path = bullseye_bin_path / \"covsrc\"\n cmd = f\"{covsrc_path} --file {covfile_path} --by-name --width 120\"\n logger.info(\"Bullseye command line: %s\", cmd)\n process = subprocess.run(\n cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n output = process.stdout\n logger.info(\"Bullseye output:\\n%s\", output)\n process.check_returncode()\n\n\ndef generate_html(bullseye_path, covfile_path, destination):\n bullseye_bin_path = bullseye_path / \"bin\"\n\n covhtml_path = bullseye_bin_path / \"covhtml\"\n cmd = f\"{covhtml_path} --file {covfile_path} {destination}\"\n logger.info(\"Bullseye command line: %s\", cmd)\n process = subprocess.run(\n cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n output = process.stdout\n logger.info(\"Bullseye output:\\n%s\", output)\n process.check_returncode()\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"intel/cassian","sub_path":"scripts/report_code_coverage.py","file_name":"report_code_coverage.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"12605109233","text":"\"\"\"\nDeclares CourseUsageInfo class to be used by the transform method in\nTransformers.\n\"\"\"\n\n\nfrom lms.djangoapps.courseware.access import _has_access_to_course\n\n\nclass CourseUsageInfo:\n '''\n A class object that encapsulates the course and user context to be\n used as currency across block structure transformers, by passing\n an instance of it in calls to BlockStructureTransformer.transform\n methods.\n '''\n def __init__(self, course_key, user, allow_start_dates_in_future=False, include_has_scheduled_content=False):\n # Course identifier (opaque_keys.edx.keys.CourseKey)\n self.course_key = course_key\n\n # User object (django.contrib.auth.models.User)\n self.user = user\n\n # Sometimes we want to allow blocks to be returned that can bypass the\n # StartDateTransformer's filter to show blocks with start dates in the future.\n # One use case of this is for the Dates page where we want to display\n # assignments that have not yet been released.\n self.allow_start_dates_in_future = allow_start_dates_in_future\n\n # This value is used within the StartDateTransformer in the case where we\n # would like to know whether there are future start dates, while still\n # filtering the blocks with start dates in the future.\n self.include_has_scheduled_content = include_has_scheduled_content\n\n # Cached value of whether the user has staff access (bool/None)\n self._has_staff_access = None\n\n @property\n def has_staff_access(self):\n '''\n Returns whether the user has staff access to the course\n associated with this CourseUsageInfo instance.\n\n For performance reasons (minimizing multiple SQL calls), the\n value is cached within this instance.\n '''\n if self._has_staff_access is None:\n self._has_staff_access = _has_access_to_course(self.user, 'staff', self.course_key)\n return self._has_staff_access\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/course_blocks/usage_info.py","file_name":"usage_info.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"72196170003","text":"from pyomo.environ import *\nfrom pyomo.dae import *\n\nBaseModel = AbstractModel()\n\nBaseModel.component_names = Set(doc=\"Names of the components that are in the mixture\")\nBaseModel.parameter_names = Set(doc=\"Names of the kinetic parameters of the reactive system\")\nBaseModel.fixed_parameter_names = Set(doc=\"Names of the kinetic parameters to fix\")\n\nBaseModel.init_conditions = Param(BaseModel.component_names,within=NonNegativeReals)\nBaseModel.fixed_parameters = Param(BaseModel.fixed_parameter_names)\nBaseModel.start_time = Param(within = NonNegativeReals, default = 0.0)\nBaseModel.end_time = Param(within = NonNegativeReals, default = 1.0)\n\n# Sets\nBaseModel.time = ContinuousSet(bounds=(BaseModel.start_time,BaseModel.end_time))\n\n# Variables\nBaseModel.C = Var(BaseModel.time,\n BaseModel.component_names,\n bounds=(0.0,None),\n initialize=1)\n\nBaseModel.dCdt = DerivativeVar(BaseModel.C,\n wrt=BaseModel.time)\n\nBaseModel.kinetic_parameter = Var(BaseModel.parameter_names,\n initialize=1)\n# Constraints\ndef rule_init_conditions(model,k):\n #st = model.start_time\n st = 0\n return model.C[st,k] == model.init_conditions[k]\n\nBaseModel.init_conditions_c = \\\n Constraint(BaseModel.component_names,rule=rule_init_conditions)\n\ndef rule_fixed_parameters(model,theta):\n return model.kinetic_parameter[theta] == model.fixed_parameters[theta]\nBaseModel.fix_parameters = Constraint(BaseModel.fixed_parameter_names,\n rule = rule_fixed_parameters)\n","repo_name":"tkrumpol/KIPET","sub_path":"kipet/library/BaseAbstractModel.py","file_name":"BaseAbstractModel.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2013414457","text":"from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\r\n\r\n\r\nclass SentimentAnalyzer:\r\n def __init__(self):\r\n self.analyzer = SentimentIntensityAnalyzer()\r\n\r\n def perform(self, input_value):\r\n sentiment_dict = self.analyzer.polarity_scores(input_value)\r\n result_dict = {\r\n 'pos': sentiment_dict['pos'] * 100,\r\n 'neg': sentiment_dict['neg'] * 100,\r\n 'neu': sentiment_dict['neu'] * 100\r\n }\r\n\r\n return result_dict","repo_name":"whitequbits/pipeliner","sub_path":"airflow/modules/sentiment_analyzer.py","file_name":"sentiment_analyzer.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"6745904560","text":"from django.conf import settings\nfrom django.http import Http404\nfrom sentry_sdk import capture_exception\n\nfrom knowledgebase.forms.submission import (\n ResourceSuggestionTypeForm,\n StorySubmissionTypeForm\n)\nfrom knowledgebase.models import CollectionRelationship\nfrom knowledgebase.tags import add_member_story_tag\nfrom content.models import MasterContent\nfrom myapa.models import ContactRole\nfrom myapa.viewmixins import AuthenticateMemberMixin\nfrom submissions.views import SubmissionEditFormView\nfrom content.mail import Mail\n\n\n\n\nclass SubmissionFormTypeView(AuthenticateMemberMixin, SubmissionEditFormView):\n home_url = '/knowledgebase/dashboard'\n success_url = '/knowledgebase/dashboard'\n body = ''\n\n def get_form_kwargs(self):\n kw = super().get_form_kwargs()\n kw['request'] = self.request\n return kw\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['body'] = self.body\n return context\n\n def set_content(self, request, *args, **kwargs):\n if not getattr(self, 'content', None):\n master_id = kwargs.pop('master_id', None)\n if master_id is not None:\n self.model_class = self.form_class.Meta.model\n self.content = self.model_class.objects.filter(\n master_id=master_id,\n publish_status='DRAFT'\n ).first()\n if not self.content:\n Http404('Submission Record not Found')\n else:\n self.content = None\n\n def after_save(self, form):\n collection_id = form['collection_choices'].value()\n collection_obj = MasterContent.objects.get(pk=collection_id)\n form_obj = form.save(commit=False)\n\n self.remove_previous_collection_relationship(form_obj)\n\n CollectionRelationship.objects.get_or_create(\n content=form_obj,\n content_master_related=collection_obj,\n relationship='RELATED'\n )\n\n ContactRole.objects.get_or_create(\n contact=self.request.user.contact,\n content=self.content,\n role_type='AUTHOR'\n )\n\n if self.user_is_submitting():\n self.send_submitted_email(form_obj)\n\n super().after_save(form)\n\n def remove_previous_collection_relationship(self, form_obj):\n collection_relationships = CollectionRelationship.objects.filter(\n content=form_obj\n )\n\n for relationship in collection_relationships:\n relationship.delete()\n\n def user_is_submitting(self):\n return self.request and 'submitButton' in self.request.POST\n\n def send_submitted_email(self, form_obj):\n try:\n mail_context = {\n 'content': form_obj,\n 'SERVER_ADDRESS': settings.SERVER_ADDRESS\n }\n email_template = 'KNOWLEDGEBASE_SUBMISSION_SUBMITTED'\n user_email = self.request.user.contact.email\n Mail.send(email_template, user_email, mail_context)\n Mail.send(\n 'KNOWLEDGEBASE_SUBMISSION_STAFF_NOTICE',\n 'knowledgebase@planning.org',\n mail_context\n )\n except Exception as e:\n capture_exception(e)\n\n\nclass StorySubmissionFormTypeView(SubmissionFormTypeView):\n title = 'Submit a Story'\n body = (\n 'Enter your story below. Please be sure '\n 'that it is correct before hitting \"Submit.\"'\n )\n form_class = StorySubmissionTypeForm\n template_name = 'knowledgebase/newtheme/submission/story.html'\n\n def after_save(self, form):\n super().after_save(form)\n form_obj = form.save(commit=False)\n add_member_story_tag(form_obj)\n\n\nclass ResourceSuggestionFormTypeView(SubmissionFormTypeView):\n title = 'Submit a Resource'\n body = (\n 'Please enter the resource name and URL. '\n 'Please be sure that both are correct '\n 'before hitting \"Submit.\"'\n )\n form_class = ResourceSuggestionTypeForm\n template_name = 'knowledgebase/newtheme/submission/resource.html'\n","repo_name":"furmanczyk5/Django-Enterprise-App","sub_path":"knowledgebase/views/submission/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18582975177","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 22 08:10:25 2021\n\n@author: Administrator\n\"\"\"\n\n# Count the number of words in text file (using flatmap)\n# Difference between map and flatmap: map repeats and flatmaps no\n# Flatmap is trying to get the dataset in a sequence ( it flattens the elements to whatever you wish to)\n# Flatmap desintegrate the elements and produces an arbitrary number\n\nfrom pyspark import SparkConf, SparkContext\nimport collections\n\n# Configuration and set the Spark cluster and App Name\nconf = SparkConf().setMaster(\"local\").setAppName(\"Word Count\")\nsc= SparkContext(conf = conf)\n\nrddone = sc.textFile(\"file:///SparkCourse/book.txt\")\nwords = rddone.flatMap(lambda x: x.split()) # Split each line into words (transform)\nresult = words.countByValue() # action\n\nfor word, count in result.items():\n cleanword = word.encode('ascii', 'ignore')\n if(cleanword):\n print(cleanword.decode() + \" \" + str(count))\n\nprint(result)\n","repo_name":"aalva500-prog/BigData-Training","sub_path":"SparkCourse/RDD/pythonspark/wordcountRDD.py","file_name":"wordcountRDD.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10658364378","text":"from glob import glob\nimport numpy as np\nfrom sourcextractor.config import *\n\ntop = load_fits_images(\n sorted(glob(\"../../FIELD0/Double_Sersic/dsersic_0_vis.fits\")),\n psfs=sorted(glob(\"../../PSFs/psf_vis_os045_high_nu.psf\")),\n weights=sorted(glob(\"../../FIELD0/Double_Sersic/dsersic_0_vis.rms.fits\")),\n constant_background = 0.0,\n weight_absolute=1,\n weight_type='rms'\n)\n\n#top.split(ByKeyword('BAND'))\nmesgroup = MeasurementGroup(top)\nset_max_iterations(350)\nconstant_background = 0.0\nMAG_ZEROPOINT = 23.9\n\n## add the apertures\n#all_apertures = []\n#for img in mesgroup:\n# all_apertures.extend(add_aperture_photometry(img, [10, 30, 60] ) )\n# add_output_column('aper', all_apertures)\n\n# load and execute the general disk+bulge model\nexec(open(\"disk_p_sersic.py\").read())\n","repo_name":"Hbretonniere/Euclid_Morphology_Challenge","sub_path":"reproducibility/SourceXtractor++/sersic_pls_disk/dsersic_var_0.py","file_name":"dsersic_var_0.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"18231467498","text":"import numpy as np \nimport seaborn as sns\nimport matplotlib.pyplot as plt\ndef model_visualization(model,X,y,classifier):\n \"\"\"\n Takes results from training dataset & visualizes it using ListedColormap\n :param model: name of the model to print on top of visual.\n :param X: train or test x predictors\n :param y: train or test y label\n :return: It returns a plot. The image is saved in images folder.\n \"\"\"\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model)) \n\ndef data_plot(hue, data):\n \"\"\"\n Takes pandas dataframe, creates a countplot, print plots to image folder\n :param data: pandas dataframe column(s) to be visualized\n :param hue: label for prediction from pandas dataframe (extracted as an array)\n :return: It returns a plot. Expected to read by pandas dataframe. The image is save in images folder.\n \"\"\"\n for i, col in enumerate(data.columns):\n plt.figure(i)\n sns.set(rc={'figure.figsize':(7, 3)})\n sns.countplot(x=data[col],palette='husl',hue=hue,data=data)\n plt.savefig('images/{0}.png'.format(col)) \n\nfrom sklearn.model_selection import cross_val_predict, cross_val_score\nfrom sklearn.metrics import confusion_matrix,classification_report,accuracy_score\n\ndef print_score(classifier,X_test,y_test):\n \"\"\"\n Takes in classifier, x & y test variables and print the model's accuracy\n classification report, and confusion matrix\n :param classifier: classifier that the model has been sustantiated\n :param X_test: test predictors\n :param y_test: test labels\n :return: printed output of accuracy score, classification report, and \n confusion matrix.\n \"\"\"\n print(\"Test results:\\n\")\n print('Accuracy Score: {0:.4f}\\n'.format(accuracy_score(y_test,classifier.predict(X_test))))\n print('Classification Report:\\n{}\\n'.format(classification_report(y_test,classifier.predict(X_test))))\n print('Confusion Matrix:\\n{}\\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))","repo_name":"Lwhieldon/IstheMushroomPoisonous","sub_path":"notebooks+data+images/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39502244380","text":"import numpy as np\nimport pandas as pd\n\ndef get_ANNEXA4_comparable_cohort(df, feature_dictionary, verbose = True):\n \"\"\"\n Get the data of the patients who match the inclusion criteria from the\n ANNEXA-4 study. Removes all patients with initial ICH volumes larger\n than 60cc, low GCS, time between LKW and first head CT over 18 hours,\n and early WLST.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe comprising the entire data\n feature_dictionary : dict\n Dictionary where keys are the full names of the feature and the values\n are the feature names saved in the csv file\n verbose : Boolean, optional\n Printsthe number of patients excluded with each inclusion criteria\n\n Returns\n -------\n pandas.DataFrame\n Data of patients who match the inclusion criteria from the ANNEXA-4\n study\n \"\"\"\n b = len(df)\n included = df[df[feature_dictionary[\"Initial ICH Volume\"]] <= 60].copy()\n c = len(included)\n included = included[included[feature_dictionary[\"Initial GCS Score: 3-4\"]] == 0].copy()\n d = len(included)\n included = included[included[feature_dictionary[\"Hours from LKW to hospital arrival\"]] < 18].copy()\n e = len(included)\n included = included[included[feature_dictionary[\"CMO/WLST at admission\"]] == 0].copy()\n f = len(included)\n\n if verbose:\n print(\"Generating the ANNEXA-4-comparable Cohort...\")\n print(\"\\tInitial ICH Volume >60cc:\", b - c, \"removed\")\n print(\"\\tInitial GCS 3-4:\", c -d, \"removed\")\n print(\"\\tTime between LKW and first head CT >18 hrs:\", d-e, \"removed\")\n print(\"\\tCMO/WLST status at admission:\", e-f, \"removed\")\n print(\"\\tLength before Exclusion:\", len(df))\n print(\"\\tLength after Exclusion:\", len(included))\n print()\n \n included.reset_index(inplace = True, drop = True)\n return included\n\ndef get_ANNEXA4_ineligible_cohort(df, feature_dictionary):\n \"\"\"\n Get the data of the patients who do not match the inclusion criteria \n from the ANNEXA-4 study.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe comprising the entire data\n feature_dictionary : dict\n Dictionary where keys are the full names of the feature and the values\n are the feature names saved in the csv file\n\n Returns\n -------\n pandas.DataFrame\n Data of patients who do not match the inclusion criteria from the\n ANNEXA-4 study\n \"\"\"\n included_study_id = get_ANNEXA4_comparable_cohort(df, feature_dictionary, verbose = False)[feature_dictionary[\"Study ID\"]]\n \n study_ids = df[feature_dictionary[\"Study ID\"]]\n excluded = df[np.invert(study_ids.isin(included_study_id))].copy()\n \n excluded.reset_index(inplace = True, drop = True)\n return excluded\n\ndef get_FXai_df(df, feature_dictionary):\n \"\"\"\n Get the data of the patients who are on Faxtor Xa inhibitors\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe comprising the entire data\n feature_dictionary : dict\n Dictionary where keys are the full names of the feature and the values\n are the feature names saved in the csv file\n\n Returns\n -------\n pandas.DataFrame\n Data of patients who are on Faxtor Xa inhibitors\n \"\"\"\n x = df.loc[np.where(df[feature_dictionary[\"FXa inhibitor use\"]] == 1, True, False)].copy()\n x.reset_index(inplace= True)\n return x\n\ndef get_higher_likelihood_of_favorable_outcome_in_ANNEXA4_cohort(df, feature_dictionary, verbose = True):\n \"\"\"\n Get the data of the ANNEXA-4-comparable patients who have a higher\n likelihood of favorable function outcome. First, the ANNEXA-4-comparable\n cohort is generated. Then patients who were WLST status at any time of\n hospital stay, were discharged to hospice, dead at discharge (mRS = 6),\n or had initial GCS scores not between 13-15 are removed.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe comprising the entire data\n feature_dictionary : dict\n Dictionary where keys are the full names of the feature and the values\n are the feature names saved in the csv file\n\n Returns\n -------\n pandas.DataFrame\n Data of of the ANNEXA-4-comparable patients who have a higher\n likelihood of favorable function outcome\n \"\"\"\n included_df = get_ANNEXA4_comparable_cohort(df, feature_dictionary, verbose = False)\n\n a = len(included_df)\n included = included_df[included_df[feature_dictionary[\"CMO/WLST at any time of hospital stay\"]] == 0].copy()\n b = len(included)\n included = included[included[feature_dictionary[\"Discharged to Hospice\"]] == 0].copy()\n c = len(included)\n included = included[included[feature_dictionary[\"mRS at Discharge\"]] != 6].copy()\n d = len(included)\n included = included[included[feature_dictionary[\"Initial GCS Score: 13-15\"]] == 1].copy()\n e = len(included)\n\n if verbose:\n print(\"Getting patients with a higher likelihood of a favorable functional outcome\")\n print(\"among the ANNEXA-4 comparable cohort...\")\n print(\"\\tCMO/WLST status at any time in hospital stay:\", a-b, \"removed\")\n print(\"\\tDischarged to Hospice:\", b - c, \"removed\")\n print(\"\\tDead at Discharge (mRS = 6):\", c -d, \"removed\")\n print(\"\\tInitial GCS Score not 13-15:\", d-e, \"removed\")\n print()\n \n included.reset_index(inplace = True, drop = True)\n return included\n\ndef get_high_ICH_volume_low_GCS_cohort(df, feature_dictionary):\n \"\"\"\n Get subset of patients who are ANNEXA-4 ineligible because of high initial\n ICH volume and low initial GCS score\n\n Parameters\n ----------\n df : pandas.DataFrame\n Dataframe comprising the entire data\n feature_dictionary : dict\n Dictionary where keys are the full names of the feature and the values\n are the feature names saved in the csv file\n\n Returns\n -------\n pandas.DataFrame\n Data of of the patients with initial ICH volumes > 60cc and/or initial \n GCS score 3-4\n \"\"\"\n inclusion_criteria1 = df[feature_dictionary[\"Initial ICH Volume\"]] >= 60\n inclusion_criteria2 = df[feature_dictionary[\"Initial GCS Score: 3-4\"]] == 1\n either_or_num = (inclusion_criteria1*1) + (inclusion_criteria2*1)\n either_or_true = either_or_num > 0\n subset = df[either_or_true].copy()\n\n return subset","repo_name":"naddan27/Andexanet-Alfa-Simulation","sub_path":"subsets.py","file_name":"subsets.py","file_ext":"py","file_size_in_byte":6405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41210558471","text":"\"\"\"\nDate: 2018/10/22\n\"\"\"\n\n\nclass Solution(object):\n def countSegments(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n flag = False\n count = 0\n for c in s:\n if c == ' ' and flag:\n count += 1\n flag = False\n if c != ' ' and not flag:\n flag = True\n if s[-1] != ' ':\n count += 1\n return count\n\nprint(Solution().countSegments(' 3'))","repo_name":"maples1993/LeetCode","sub_path":"q434.py","file_name":"q434.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20491164499","text":"#!/usr/bin/env python\n\nimport rospy\nimport math\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import Float64\nfrom vesc_msgs.msg import VescStateStamped \n\nglobal thresh\nglobal turn_div\nglobal bub\nthresh = 2.75 #edit this number for the threshold distance\nturn_div = 120.0 #edit this number for the servo command divisor (larger number means milder turns)\nbub = 26 #edit this number for the forbidden bubble (larger number will mandate a larger gap)\n\ndef bubble(data):\n global counter\n global direc\n global prev\n if(counter == 0): #find out what the minimum distance is located\n data = list(data.ranges)\n cone = data[-45:]+data[0:45]\n mincone = min(cone)\n Min=(cone.index(mincone))\n if(counter == 0):\n count = 0\n direc = float(\"inf\") #divide data into two chunks\n right = cone[0:Min]\n left = cone[Min+1:]\n for i in range(len(right)): #find gap using for loop\n tar = right[i]\n if(tar>thresh):\n count+=1\n if(tar<=thresh):\n count =0\n if(count == bub):\n direc = i-bub/2\n count = 0\n count = 0\n for i in range(len(left)):\n tar = left[i]\n if(tar>thresh):\n count+=1\n if(tar<=thresh):\n count =0\n if(count == bub):\n direc = i-bub/2+Min+1\n count = 0\n if(direc != float(\"inf\")): #if no gap is found, use the previous servo command\n prev = direc\n pub.publish((direc-45)/turn_div+0.5) #new servo command, proportional to the angle of the gap center\n pub2.publish(0.09) #constant duty cycle\n counter =0\n\n\ndef servodetect():\n global counter\n global pub\n global pub2\n counter = 0\n rospy.init_node(\"servodetect\",anonymous = True)\n rospy.Subscriber(\"/scan\", LaserScan, bubble)\n pub = rospy.Publisher(\"/commands/servo/position\",Float64,queue_size=10)\n pub2 = rospy.Publisher(\"commands/motor/duty_cycle\",Float64,queue_size=10)\n rospy.spin()\n\nif __name__ == '__main__':\n servodetect()\n\n","repo_name":"JimingYan/ProjectRepository","sub_path":"Node_Executables/servodetect.py","file_name":"servodetect.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1976757827","text":"# 코딩테스트 연습 / 연습문제 / 미로 탈출\n# 실패 - newmaps 크기 = n+2 * m+2\n\nfrom collections import deque\n\ndirection = [[1,0],[-1,0],[0,1],[0,-1]]\n\ndef bfs(i, j, final_i, final_j, newmaps):\n visited = set()\n q = deque([(i,j,0)])\n \n while q:\n r, c, cnt = q.popleft()\n if (r,c) in visited:\n continue\n visited.add((r, c))\n if (r,c) == (final_i,final_j):\n return cnt\n \n for x,y in direction:\n if newmaps[r+x][c+y] != 'X':\n q.append((r+x,c+y,cnt+1))\n \n return -1\n \n \n \ndef solution(maps):\n newmaps = [['X'] * (len(maps[0])+2) for _ in range(len(maps)+2)]\n for i in range(len(maps)):\n for j in range(len(maps[0])):\n newmaps[i+1][j+1] = maps[i][j]\n \n S = (0,0)\n E = (0,0)\n L = (0,0)\n for i in range(len(newmaps)):\n for j in range(len(newmaps[0])):\n if newmaps[i][j] == 'S':\n S = (i, j)\n if newmaps[i][j] == 'E':\n E = (i, j)\n if newmaps[i][j] == 'L':\n L = (i, j)\n \n stol = bfs(S[0], S[1], L[0], L[1], newmaps)\n ltoe = bfs(L[0], L[1], E[0], E[1], newmaps)\n if stol == -1 or ltoe == -1:\n return -1\n \n return stol + ltoe","repo_name":"whitem4rk/2023-algorithm-study","sub_path":"LEVEL 2/miroexit.py","file_name":"miroexit.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37726691212","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\nmovies =[]\r\n\r\ndef remove_space(sentence):\r\n sentence = ''.join(sentence.split())\r\n return sentence\r\n\r\n\r\ndef get_movie(url):\r\n page = requests.get(url)\r\n soup = BeautifulSoup(page.content, 'html.parser')\r\n for item in soup.find_all('div', class_= 'lister-item-content'):\r\n name = (item.find('a').get_text())\r\n year = (item.find('span', class_='lister-item-year').get_text())\r\n rate = (item.find('span', class_='ipl-rating-star__rating').get_text())\r\n try:\r\n metascore = (item.find('span', class_='metascore').get_text())\r\n except:\r\n metascore = \"none\"\r\n\r\n #somehow cant get the certificate span tag when requesting page !!!\r\n #cert = (item.find('span', class_='certificate').get_text())\r\n runtime = (item.find('span', class_='runtime').get_text())\r\n genre = (item.find('span', class_='genre').get_text())\r\n genre = genre.split(\", \")\r\n\r\n genre1 = remove_space(genre[0])\r\n\r\n try:\r\n genre2 = remove_space(genre[1])\r\n except:\r\n genre2=\"none\"\r\n try:\r\n genre3 = remove_space(genre[2])\r\n except:\r\n genre3=\"none\"\r\n\r\n\r\n for i in item.find_all('p', class_='text-muted'):\r\n if(i.find_all('a') != []):\r\n director = i.find_all('a')[0].get_text()\r\n star1 = i.find_all('a')[1].get_text()\r\n star2 = i.find_all('a')[2].get_text()\r\n star3 = i.find_all('a')[3].get_text()\r\n\r\n\r\n\r\n movies.append([name, year, rate, metascore, director, star1, star2, star3, runtime, genre1, genre2, genre3])\r\n\r\nurls = [\r\n \"https://www.imdb.com/list/ls050782187/\",\r\n \"https://www.imdb.com/list/ls050782187/?sort=list_order,asc&st_dt=&mode=detail&page=2\",\r\n \"https://www.imdb.com/list/ls050782187/?sort=list_order,asc&st_dt=&mode=detail&page=3\",\r\n \"https://www.imdb.com/list/ls050782187/?sort=list_order,asc&st_dt=&mode=detail&page=4\",\r\n \"https://www.imdb.com/list/ls050782187/?sort=list_order,asc&st_dt=&mode=detail&page=5\",\r\n]\r\nfor url in urls:\r\n get_movie(url)\r\n\r\n#print(movies)\r\npd.DataFrame(movies).to_excel('output.xlsx', header=False, index=False)","repo_name":"masoudqashqai/imdb500","sub_path":"imdb500.py","file_name":"imdb500.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6087898446","text":"import bpy\nimport bmesh\nimport math\n\nbasis_leg_name = 'basis_leg'\nbasis_leg_with_feets_name = 'basis_leg_with_feets'\nbasis_mainboard_leg_name = 'basis_mainboard_leg'\n\ndef create_mesh(\n e, t, h, w, x, z,\n teeth_width, teeth_height, teeth_thickness,\n side_teeth_width, side_teeth_height, side_teeth_thickness, side_teeth_z,\n with_foot_connexion,\n power_on_p, power_on_ri, power_on_re, power_on_z, power_on_height\n):\n mesh = bpy.data.meshes.new(\n basis_leg_name\n + '_' + str((\n e, t, h, w, x, z,\n teeth_width, teeth_height, teeth_thickness,\n side_teeth_width, side_teeth_height, side_teeth_thickness, side_teeth_z,\n with_foot_connexion,\n power_on_p, power_on_ri, power_on_re, power_on_z, power_on_height\n ))\n )\n\n hw = 0.5 * w\n ht = 0.5 * t\n htt = 0.5 * teeth_thickness\n htw = 0.5 * teeth_width\n hstt = 0.5 * side_teeth_thickness\n hstw = 0.5 * side_teeth_width\n\n sth = z - e - h + teeth_height + side_teeth_z # total side teeth z\n\n vertices = [\n (x - ht, hw, z - e),\n (x - ht, -hw, z - e),\n (x + ht, hw, z - e),\n (x + ht, -hw, z - e),\n\n (x - htt - e, htw + e, z - e),\n (x - htt - e, -htw - e, z - e),\n (x + htt + e, htw + e, z - e),\n (x + htt + e, -htw - e, z - e),\n\n # 8\n (x - ht, hw, z - e),\n (x - ht, -hw, z - e),\n (x + ht, hw, z - e),\n (x + ht, -hw, z - e),\n\n (x - htt - e, htw + e, z - e + teeth_height),\n (x - htt - e, -htw - e, z - e + teeth_height),\n (x + htt + e, htw + e, z - e + teeth_height),\n (x + htt + e, -htw - e, z - e + teeth_height),\n\n # 16\n (x - ht, hw, z - e - h + teeth_height),\n (x - ht, -hw, z - e - h + teeth_height),\n (x + ht, hw, z - e - h + teeth_height),\n (x + ht, -hw, z - e - h + teeth_height),\n\n (x - htt, htw, z - e - h + teeth_height),\n (x - htt, -htw, z - e - h + teeth_height),\n (x + htt, htw, z - e - h + teeth_height),\n (x + htt, -htw, z - e - h + teeth_height),\n\n # 24\n (x - htt, htw, z - e - h),\n (x - htt, -htw, z - e - h),\n (x + htt, htw, z - e - h),\n (x + htt, -htw, z - e - h),\n ]\n\n edges = [\n (0, 1), (1, 3), (3, 2), (2, 0),\n (4, 5), (5, 7), (7, 6), (6, 4),\n (8, 9), (9, 11), (11, 10), (10, 8),\n (12, 13), (13, 15), (15, 14), (14, 12),\n (4, 12),(5, 13), (6, 14), (7, 15),\n (0, 8), (1, 9), (2, 10), (3, 11),\n (16, 17), (17, 19), (19, 18), (18, 16),\n (8, 16), (9, 17), (10, 18), (11, 19),\n (20, 21), (21, 23), (23, 22), (22, 20),\n (24, 25), (25, 27), (27, 26), (26, 24),\n (20, 24), (21, 25), (22, 26), (23, 27),\n ]\n faces = [\n (0, 1, 5, 4),\n (1, 3, 7, 5),\n (3, 2, 6, 7),\n (2, 0, 4, 6),\n\n (12, 13, 15, 14),\n\n (12, 4, 5, 13),\n (6, 14, 15, 7),\n (4, 12, 14, 6),\n (7, 15, 13, 5),\n\n (1, 0, 8, 9),\n (3, 1, 9, 11),\n (2, 3, 11, 10),\n (0, 2, 10, 8),\n\n # (8, 16, 17, 9),\n # (18, 10, 11, 19),\n\n (16, 20, 21, 17),\n (22, 18, 19, 23),\n (20, 16, 18, 22),\n (17, 21, 23, 19),\n\n (20, 24, 25, 21),\n (26, 22, 23, 27),\n (24, 20, 22, 26),\n (21, 25, 27, 23),\n\n (25, 24, 26, 27),\n ]\n\n if with_foot_connexion:\n vertices.extend([\n #28\n (x - hstt, hw, sth + hstw),\n (x - hstt, -hw, sth + hstw),\n (x + hstt, hw, sth + hstw),\n (x + hstt, -hw, sth + hstw),\n\n # 32\n (x - hstt, hw, sth - hstw),\n (x - hstt, -hw, sth - hstw),\n (x + hstt, hw, sth - hstw),\n (x + hstt, -hw, sth - hstw),\n\n #36\n (x - hstt, hw - side_teeth_height, sth + hstw),\n (x - hstt, -hw + side_teeth_height, sth + hstw),\n (x + hstt, hw - side_teeth_height, sth + hstw),\n (x + hstt, -hw + side_teeth_height, sth + hstw),\n\n #40\n (x - hstt, hw - side_teeth_height, sth - hstw),\n (x - hstt, -hw + side_teeth_height, sth - hstw),\n (x + hstt, hw - side_teeth_height, sth - hstw),\n (x + hstt, -hw + side_teeth_height, sth - hstw),\n ])\n\n edges.extend([\n (28, 30), (30, 34), (34, 32), (32, 28),\n\n (29, 31), (31, 35), (35, 33), (33, 29),\n\n (36, 38), (38, 42), (42, 40), (40, 36),\n (28, 36), (30, 38), (32, 40), (34, 42),\n\n (37, 39), (39, 43), (43, 41), (41, 37),\n (29, 37), (31, 39), (33, 41), (35, 43),\n ])\n\n faces.extend([\n (8, 10, 30, 28),\n (32, 34, 18, 16),\n (8, 28, 32, 16),\n (30, 10, 18, 34),\n\n (11, 9, 29, 31),\n (35, 33, 17, 19),\n (29, 9, 17, 33),\n (11, 31, 35, 19),\n\n (36, 38, 42, 40),\n (38, 36, 28, 30),\n (40, 42, 34, 32),\n (36, 40, 32, 28),\n (42, 38, 30, 34),\n\n (39, 37, 41, 43),\n (37, 39, 31, 29),\n (43, 41, 33, 35),\n (41, 37, 29, 33),\n (39, 43, 35, 31),\n ])\n else:\n vertices.extend([\n #28\n (x - ht, hw, sth + side_teeth_z),\n (x - ht, -hw, sth + side_teeth_z),\n (x + ht, hw, sth + side_teeth_z),\n (x + ht, -hw, sth + side_teeth_z),\n ])\n\n faces.extend([\n (28, 8, 10, 30),\n (9, 29, 31, 11),\n ])\n\n nbverts_power_on = len(vertices)\n vertices.extend([\n (x + ht, power_on_ri, z - e - power_on_z + power_on_ri),\n (x + ht, -power_on_ri, z - e - power_on_z + power_on_ri),\n (x + ht, -power_on_ri, z - e - power_on_z - power_on_ri),\n (x + ht, power_on_ri, z - e - power_on_z - power_on_ri),\n\n (x - ht, power_on_re, z - e - power_on_z + power_on_re),\n (x - ht, -power_on_re, z - e - power_on_z + power_on_re),\n (x - ht, -power_on_re, z - e - power_on_z - power_on_re),\n (x - ht, power_on_re, z - e - power_on_z - power_on_re),\n ])\n\n faces.extend([\n (nbverts_power_on, 10, 11, nbverts_power_on + 1),\n (nbverts_power_on + 1, 11, 19, nbverts_power_on + 2),\n (nbverts_power_on + 2, 19, 18, nbverts_power_on + 3),\n (nbverts_power_on + 3, 18, 10, nbverts_power_on),\n\n (8, nbverts_power_on + 4, nbverts_power_on + 5, 9),\n (9, nbverts_power_on + 5, nbverts_power_on + 6, 17),\n (17, nbverts_power_on + 6, nbverts_power_on + 7, 16),\n (16, nbverts_power_on + 7, nbverts_power_on + 4, 8),\n ])\n\n nbverts_power_on2 = len(vertices)\n i1 = None\n i2 = None\n i3 = None\n for i in range(0, power_on_p + 1):\n alpha = i * (2 * math.pi) / power_on_p\n ca = math.cos(alpha)\n sa = math.sin(alpha)\n ci = power_on_ri * ca\n si = power_on_ri * sa\n ce = power_on_re * ca\n se = power_on_re * sa\n\n verts = [\n (x + ht, ci, z - e - power_on_z - si),\n (x + ht - power_on_height, ci, z - e - power_on_z - si),\n (x + ht - power_on_height, ce, z - e - power_on_z - se),\n (x - ht, ce, z - e - power_on_z - se),\n ]\n vertices.extend(verts)\n nbidx = len(verts)\n\n bi = nbverts_power_on2 + i * nbidx\n mi = bi + 1\n me = bi + 2\n te = bi + 3\n\n edges.extend([\n (bi, mi),\n (me, te),\n (mi, me),\n ])\n\n if i > 0:\n faces.extend([\n (bi, bi - nbidx, mi - nbidx, mi),\n (me, me - nbidx, te - nbidx, te),\n (mi, mi - nbidx, me - nbidx, me),\n ])\n\n if alpha > 1.5 * math.pi:\n if i3 is None:\n i3 = bi\n\n faces.extend([\n (bi - nbidx, bi, nbverts_power_on + 0),\n (te, te - nbidx, nbverts_power_on + 4),\n ])\n elif alpha > math.pi:\n if i2 is None:\n i2 = bi\n\n faces.extend([\n (bi - nbidx, bi, nbverts_power_on + 1),\n (te, te - nbidx, nbverts_power_on + 5),\n ])\n elif alpha > 0.5 * math.pi:\n if i1 is None:\n i1 = bi\n\n faces.extend([\n (bi - nbidx, bi, nbverts_power_on + 2),\n (te, te - nbidx, nbverts_power_on + 6),\n ])\n else:\n faces.extend([\n (bi - nbidx, bi, nbverts_power_on + 3),\n (te, te - nbidx, nbverts_power_on + 7),\n ])\n\n mesh.from_pydata(vertices, edges, faces)\n mesh.update()\n\n return mesh\n\ndef mainboard_leg_mesh(basis_leg_mesh, mainboard_mesh):\n bm = bmesh.new()\n\n bm.from_mesh(basis_leg_mesh)\n bm.from_mesh(mainboard_mesh)\n\n mesh = bpy.data.meshes.new(\n basis_mainboard_leg_name\n )\n\n bm.to_mesh(mesh)\n bm.free()\n\n return mesh\n\ndef leg_with_feets_mesh(leg_mesh, left_foot_mesh, right_foot_mesh):\n bm = bmesh.new()\n\n bm.from_mesh(leg_mesh)\n bm.from_mesh(left_foot_mesh)\n bm.from_mesh(right_foot_mesh)\n\n mesh = bpy.data.meshes.new(\n basis_leg_with_feets_name\n )\n\n bm.to_mesh(mesh)\n bm.free()\n\n return mesh","repo_name":"DethCount/hexcope","sub_path":"meshes/basis_leg.py","file_name":"basis_leg.py","file_ext":"py","file_size_in_byte":9431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41354551477","text":"from common.register import base\nimport consul\nimport requests\n\nclass ConsulRegister(base.Register):\n def __init__(self,host,port):\n self.host = host\n self.port = port\n self.c = consul.Consul(host=host,port=port)\n\n def register(self,name,id,address,port,tags,check)->bool:\n if check is None:\n check = {\n \"GRPC\": f\"{address}:{port}\",\n \"GRPCUseTLS\":False,\n \"Timeout\":\"5s\",\n \"Interval\":\"5s\",\n \"DeregisterCriticalServiceAfter\":\"15s\"\n }\n else:\n check = check\n return self.c.agent.service.register(name=name,service_id=id ,\n address=address,port=port,tags=tags,check=check)\n\n def deregister(self,service_id):\n rsp = self.c.agent.service.deregister(service_id)\n return rsp\n\n def get_all_service(self):\n return self.c.agent.service()\n\n def filter_service(self,filter):\n url = f\"http://{self.host}:{self.port}/v1/agent/services\"\n params = {\n \"filter\":filter\n }\n return requests.get(url,params=params).json()","repo_name":"mydre/micro_srvs_python","sub_path":"common/register/consul.py","file_name":"consul.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16215229107","text":"from itertools import cycle\n\nnotes = [\"c\", \"c#\", \"d\", \"d#\", \"e\", \"f\", \"f#\", \"g\", \"g#\", \"a\", \"a#\", \"b\"]\n\n# from collections import sorte\n\nstart_midi = 12\n\npiano_notes = []\n\nscale_interval = {\"major\": [2, 2, 1, 2, 2, 2], \"minor\": [2, 1, 2, 2, 1, 2]}\n\n\nclass Piano:\n @staticmethod\n def get_notes(piano_start=21, piano_end=108):\n piano_notes = []\n piano_range = range(piano_start, piano_end)\n for i in range(9):\n for n, note in enumerate(notes):\n pitch = start_midi + (12 * i + n)\n note_name = f\"{note}{i}\"\n if pitch in piano_range:\n piano_notes.append(\n dict(note_name=note_name, pitch=pitch, note=note)\n )\n return piano_notes\n\n @staticmethod\n def get_scale(key=\"c\", scale=\"major\"):\n piano_notes = Piano.get_notes()\n piano_scale_notes = []\n note_cycle = cycle(notes)\n scale_notes = []\n for n in notes:\n note = next(note_cycle)\n if note == key:\n scale_notes.append(note)\n intervals = scale_interval.get(scale)\n for interval in intervals:\n for i in range(interval):\n note = next(note_cycle)\n scale_notes.append(note)\n for note in piano_notes:\n if note.get(\"note\") in scale_notes:\n piano_scale_notes.append(note)\n return piano_scale_notes\n","repo_name":"ziwoz/coded_music","sub_path":"notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7149423392","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n# from selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\ndef radio_button():\n\n driver = webdriver.Chrome()\n\n driver.implicitly_wait(10)\n\n url = \"https://fs2.formsite.com/meherpavan/form2/index.html?1537702596407\"\n driver.get(url)\n\n # status = driver.find_element(By.ID, \"RESULT_RadioButton-7_0\").is_selected()\n # print(status)\n\n # driver.find_element(By.XPATH, \"//*[@id='q26']/table/tbody/tr[1]/td/label\").click()\n #\n # time.sleep(5)\n # status1 = driver.find_element(By.XPATH, \"//*[@id='q26']/table/tbody/tr[1]/td/label\").is_selected()\n # print(status1)\n\n # driver.find_element(By.XPATH, \"//*[@id='q26']/table/tbody/tr[2]/td/label\").click()\n # status2 = driver.find_element(By.XPATH, \"//*[@id='q26']/table/tbody/tr[2]/td/label\").is_selected()\n # print(status2)\n\n elementCheckbox = \"//*[@id='q15']/table/tbody/tr[1]/td/label\"\n clickCheckbox = driver.find_element(By.XPATH, elementCheckbox)\n clickCheckbox.click()\n\n status = clickCheckbox.is_selected()\n print(status)\n \n\n time.sleep(3)\n driver.close()\n\nif __name__==\"__main__\":\n radio_button()","repo_name":"baothais/pythonProject","sub_path":"selenium/radio_button.py","file_name":"radio_button.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17909853957","text":"import asyncio\nimport logging\nimport pytest\nimport sys\nimport qrm_server.qrm_http_server\nimport qrm_defs.qrm_urls\nimport json\nimport os\nfrom aiohttp import web\nfrom pathlib import Path\nfrom db_adapters import redis_adapter\nfrom pytest_redis import factories\nfrom qrm_server import management_server\nfrom qrm_server import qrm_http_server\nfrom qrm_defs.resource_definition import Resource, ACTIVE_STATUS\nfrom qrm_server.q_manager import QueueManagerBackEnd, QrmIfc, \\\n ResourcesRequest, ResourcesRequestResponse\nfrom pytest_httpserver import HTTPServer\nfrom qrm_client.qrm_http_client import QrmClient, ManagementClient\nfrom werkzeug.wrappers import Request, Response\nfrom multiprocessing import Process\n\nTEST_TOKEN = 'token1234'\nREDIS_PORT = 6379\n\nhere = Path(__file__).resolve().parent.parent\nsys.path.append(f'{here}')\nlogging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] [%(levelname)s] [%(module)s] [%(message)s]')\nredis_my_proc = factories.redis_proc(port=REDIS_PORT)\nredis_my = factories.redisdb('redis_my_proc')\nwait_for_test_call_times = 0\n\n\ndef test_if_redis_server_is_up():\n stat = os.system('service redis-server status')\n if not stat:\n logging.error(\n f'\"service redis-server status\" return is = {stat}, test cant run with redis, run: \"service redis-server stop\"')\n try:\n stat = os.system('service redis-server stop')\n except Exception as e:\n logging.error(e)\n\n\ntest_if_redis_server_is_up()\n\n\ndef json_to_dict(json_str: str or dict) -> dict:\n if isinstance(json_str, str):\n return json.loads(json_str)\n else:\n return json_str\n\n\n# noinspection PyMethodMayBeStatic\nclass QueueManagerBackEndMock(QrmIfc):\n for_test_is_request_active: bool = False\n get_filled_request_obj: ResourcesRequestResponse = ResourcesRequestResponse()\n\n async def cancel_request(self, token: str) -> None:\n print('####### using cancel_request in QueueManagerBackEndMock ####### ')\n return\n\n async def new_request(self, resources_request: ResourcesRequest) -> ResourcesRequestResponse:\n resources_request_res = ResourcesRequestResponse()\n resources_request_res.token = resources_request.token\n return resources_request_res\n\n async def is_request_active(self, token: str) -> bool:\n return self.for_test_is_request_active\n\n async def get_new_token(self, token: str) -> str:\n return f'{token}_new'\n\n async def get_resource_req_resp(self, token: str) -> ResourcesRequestResponse:\n return self.get_filled_request_obj\n\n async def init_backend(self) -> None:\n pass\n\n async def stop_backend(self) -> None:\n pass\n\n\n@pytest.fixture\ndef event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()\n\n\n@pytest.fixture(scope='session')\ndef default_test_token() -> str:\n return TEST_TOKEN\n\n\n@pytest.fixture(scope='function')\ndef qrm_server_mock_for_client(httpserver: HTTPServer, default_test_token: str) -> HTTPServer:\n # noinspection PyShadowingNames\n def new_request_handler(request: Request):\n req_json = request.json\n req_json = json_to_dict(req_json)\n rrr_obj = ResourcesRequestResponse(token=req_json['token'])\n rrr_json = rrr_obj.to_json()\n res = Response(rrr_json, status=200, content_type=\"application/json\")\n return res\n\n rrr_obj = ResourcesRequestResponse()\n rrr_obj.token = default_test_token\n rrr_json = rrr_obj.to_json()\n httpserver.expect_request(f'{qrm_defs.qrm_urls.URL_GET_ROOT}').respond_with_data(\"1\")\n httpserver.expect_request(\n f'{qrm_defs.qrm_urls.URL_POST_CANCEL_TOKEN}').respond_with_data(qrm_http_server.canceled_token_msg(TEST_TOKEN))\n httpserver.expect_request(qrm_defs.qrm_urls.URL_POST_NEW_REQUEST).respond_with_handler(new_request_handler)\n httpserver.expect_request(qrm_defs.qrm_urls.URL_GET_TOKEN_STATUS).respond_with_json(rrr_json)\n httpserver.expect_request(qrm_defs.qrm_urls.URL_GET_IS_SERVER_UP).respond_with_json({'status': True})\n return httpserver\n\n\n@pytest.fixture(scope='function')\ndef qrm_server_mock_for_client_for_debug(httpserver: HTTPServer, default_test_token) -> HTTPServer:\n def handler(request: Request):\n print('#### start debug print ####')\n print(request)\n print('#### end debug print ####')\n res = Response()\n res.status_code = 200\n return res\n\n def handler_for_wait_for_test(request: Request):\n global wait_for_test_call_times\n rrr_obj = ResourcesRequestResponse()\n rrr_obj.token = default_test_token\n if wait_for_test_call_times > 1:\n rrr_obj.request_complete = True\n rrr_obj.names.append('res1')\n rrr_json = rrr_obj.to_json()\n res = Response(rrr_json, status=200, content_type=\"application/json\")\n wait_for_test_call_times += 1\n return res\n\n httpserver.expect_request(f'{qrm_defs.qrm_urls.URL_GET_ROOT}').respond_with_handler(handler)\n httpserver.expect_request(f'{qrm_defs.qrm_urls.URL_POST_CANCEL_TOKEN}').respond_with_handler(handler)\n httpserver.expect_request(qrm_defs.qrm_urls.URL_GET_TOKEN_STATUS).respond_with_handler(handler_for_wait_for_test)\n yield httpserver\n\n\n@pytest.fixture(scope='function')\ndef qrm_server_mock_for_client_with_error(httpserver: HTTPServer) -> HTTPServer:\n httpserver.expect_request(f'{qrm_defs.qrm_urls.URL_POST_CANCEL_TOKEN}').respond_with_response(Response(status=400))\n return httpserver\n\n\n@pytest.fixture(scope='function')\ndef qrm_http_client_with_server_mock(qrm_server_mock_for_client: HTTPServer) -> QrmClient:\n qrm_client_obj = QrmClient(server_ip=qrm_server_mock_for_client.host,\n server_port=qrm_server_mock_for_client.port,\n user_name='test_user')\n qrm_client_obj.wait_for_server_up()\n return qrm_client_obj\n\n\n@pytest.fixture(scope='function')\ndef qrm_http_client_with_server_mock_debug_prints(qrm_server_mock_for_client_for_debug: HTTPServer) -> QrmClient:\n qrm_client_obj = QrmClient(server_ip=qrm_server_mock_for_client_for_debug.host,\n server_port=qrm_server_mock_for_client_for_debug.port,\n user_name='test_user')\n return qrm_client_obj\n\n\n@pytest.fixture(scope='session')\ndef qrm_backend_mock() -> QueueManagerBackEndMock:\n return QueueManagerBackEndMock()\n\n\n@pytest.fixture(scope='function')\ndef qrm_backend_mock_cls() -> QueueManagerBackEndMock:\n return QueueManagerBackEndMock()\n\n\n@pytest.fixture(scope='session')\ndef resource_dict_1() -> dict:\n return {'name': 'resource_1', 'type': 'server'}\n\n\n@pytest.fixture(scope='session')\ndef resource_dict_2() -> dict:\n return {'name': 'resource_2', 'type': 'server'}\n\n\n@pytest.fixture(scope='session')\ndef resource_dict_3() -> dict:\n return {'name': 'resource_3', 'type': 'server'}\n\n\n@pytest.fixture(scope='function')\ndef resource_foo() -> Resource:\n return Resource(name='foo', type='server')\n\n\n@pytest.fixture(scope='function')\ndef resource_bar() -> Resource:\n return Resource(name='bar', type='server')\n\n\n@pytest.fixture(scope='function')\nasync def redis_db_object(redis_my) -> redis_adapter.RedisDB:\n test_adapter_obj = redis_adapter.RedisDB(redis_port=REDIS_PORT, pubsub_polling_time=0.05)\n await test_adapter_obj.init_params_blocking()\n yield test_adapter_obj\n await test_adapter_obj.close()\n del test_adapter_obj\n\n\n@pytest.fixture(scope='function')\nasync def redis_db_object_with_resources(redis_my, resource_foo) -> redis_adapter.RedisDB:\n test_adapter_obj = redis_adapter.RedisDB(redis_port=REDIS_PORT, pubsub_polling_time=0.05)\n await test_adapter_obj.init_params_blocking()\n await test_adapter_obj.add_resource(resource_foo)\n await test_adapter_obj.set_qrm_status(status='active')\n await test_adapter_obj.get_all_resources_dict()\n yield test_adapter_obj\n await test_adapter_obj.close()\n del test_adapter_obj\n\n\n@pytest.fixture(scope='function')\ndef post_to_mgmt_server(event_loop, aiohttp_client):\n app = web.Application()\n management_server.init_redis()\n app.router.add_post(qrm_defs.qrm_urls.ADD_RESOURCES, management_server.add_resources)\n app.router.add_post(qrm_defs.qrm_urls.REMOVE_RESOURCES, management_server.remove_resources)\n app.router.add_get(qrm_defs.qrm_urls.MGMT_STATUS_API, management_server.status)\n app.router.add_post(qrm_defs.qrm_urls.SET_SERVER_STATUS, management_server.set_server_status)\n app.router.add_post(qrm_defs.qrm_urls.SET_RESOURCE_STATUS, management_server.set_resource_status)\n app.router.add_post(qrm_defs.qrm_urls.ADD_TAG_TO_RESOURCE, management_server.add_tag_to_resource)\n app.router.add_post(qrm_defs.qrm_urls.REMOVE_TAG_FROM_RESOURCE, management_server.remove_tag_from_resource)\n app.on_shutdown.append(management_server.close_redis)\n yield event_loop.run_until_complete(aiohttp_client(app))\n\n\n@pytest.fixture(scope='function')\ndef post_to_http_server(event_loop, aiohttp_client):\n app = web.Application()\n qrm_http_server.init_qrm_back_end(QueueManagerBackEndMock())\n app.router.add_post(qrm_defs.qrm_urls.URL_POST_NEW_REQUEST, qrm_http_server.new_request)\n app.router.add_post(qrm_defs.qrm_urls.URL_POST_CANCEL_TOKEN, qrm_http_server.cancel_token)\n app.router.add_get(qrm_defs.qrm_urls.URL_GET_TOKEN_STATUS, qrm_http_server.get_token_status)\n yield event_loop.run_until_complete(aiohttp_client(app))\n\n\n@pytest.fixture(scope='function')\ndef post_to_http_server2(event_loop, aiohttp_server):\n app = web.Application()\n qrm_http_server.init_qrm_back_end(QueueManagerBackEndMock())\n app.router.add_post(qrm_defs.qrm_urls.URL_POST_NEW_REQUEST, qrm_http_server.new_request)\n app.router.add_post(qrm_defs.qrm_urls.URL_POST_CANCEL_TOKEN, qrm_http_server.cancel_token)\n app.router.add_get(qrm_defs.qrm_urls.URL_GET_TOKEN_STATUS, qrm_http_server.get_token_status)\n app.router.add_get(qrm_defs.qrm_urls.URL_GET_ROOT, qrm_http_server.root_url)\n yield event_loop.run_until_complete(aiohttp_server(app))\n\n\n@pytest.fixture(scope='function')\ndef qrm_http_server_for_system(unused_tcp_port_factory) -> dict:\n port = unused_tcp_port_factory()\n p = Process(target=qrm_server.qrm_http_server.run_server, args=(port,))\n p.start()\n yield {'http_port': port}\n p.terminate()\n\n\n@pytest.fixture(scope='function')\ndef qrm_http_server_for_system_pending(unused_tcp_port_factory) -> dict:\n pending = True\n port = unused_tcp_port_factory()\n p = Process(target=qrm_server.qrm_http_server.run_server, args=(port, pending,))\n p.start()\n yield {'http_port': port}\n p.terminate()\n\n\n@pytest.fixture(scope='function')\ndef qrm_management_server(unused_tcp_port_factory) -> dict:\n port = unused_tcp_port_factory()\n p = Process(target=qrm_server.management_server.main, kwargs={'listen_port': port})\n p.start()\n yield {'management_port': port}\n p.terminate()\n\n\n@pytest.fixture(scope='function')\nasync def full_qrm_servers_ports(unused_tcp_port_factory, qrm_http_server_for_system,\n qrm_management_server, redis_db_object) -> dict:\n ports_dict = {}\n\n await redis_db_object.add_resource(Resource(name='r1', type='server', status=ACTIVE_STATUS))\n await redis_db_object.add_resource(Resource(name='r2', type='server', status=ACTIVE_STATUS))\n await redis_db_object.add_resource(Resource(name='r3', type='server', status=ACTIVE_STATUS))\n ports_dict.update(qrm_management_server)\n ports_dict.update(qrm_http_server_for_system)\n return ports_dict\n\n\n@pytest.fixture(scope='function')\ndef qrm_client(full_qrm_servers_ports: dict) -> QrmClient:\n client = QrmClient(server_ip='127.0.0.1',\n server_port=full_qrm_servers_ports['http_port'],\n user_name='test_user')\n client.wait_for_server_up()\n return client\n\n\n@pytest.fixture(scope='function')\ndef qrm_client_pending(full_qrm_servers_ports_pending_logic: dict) -> QrmClient:\n client = QrmClient(server_ip='127.0.0.1',\n server_port=full_qrm_servers_ports_pending_logic['http_port'],\n user_name='test_user')\n client.wait_for_server_up()\n return client\n\n\n@pytest.fixture(scope='function')\ndef mgmt_client(full_qrm_servers_ports: dict) -> ManagementClient:\n client = ManagementClient(server_ip='127.0.0.1',\n server_port=full_qrm_servers_ports['management_port'],\n user_name='test_user')\n return client\n\n\n@pytest.fixture(scope='function')\ndef mgmt_client_pending(full_qrm_servers_ports_pending_logic: dict) -> ManagementClient:\n client = ManagementClient(server_ip='127.0.0.1',\n server_port=full_qrm_servers_ports_pending_logic['management_port'],\n user_name='test_user')\n return client\n\n\n@pytest.fixture(scope='function')\ndef full_qrm_servers_ports_pending_logic(unused_tcp_port_factory, qrm_http_server_for_system_pending,\n qrm_management_server, redis_db_object) -> dict:\n ports_dict = {}\n\n r1 = asyncio.gather(redis_db_object.add_resource(Resource(name='r1', type='server',\n status=ACTIVE_STATUS, tags=['server'])))\n r2 = asyncio.gather(redis_db_object.add_resource(Resource(name='r2', type='server',\n status=ACTIVE_STATUS, tags=['server'])))\n r3 = asyncio.gather(redis_db_object.add_resource(Resource(name='r3', type='server',\n status=ACTIVE_STATUS, tags=['server'])))\n r4 = asyncio.gather(redis_db_object.add_resource(Resource(name='v1', type='vlan',\n status=ACTIVE_STATUS, tags=['vlan'])))\n r5 = asyncio.gather(redis_db_object.add_resource(Resource(name='v2', type='vlan',\n status=ACTIVE_STATUS, tags=['vlan'])))\n r6 = asyncio.gather(redis_db_object.add_resource(Resource(name='v3', type='vlan',\n status=ACTIVE_STATUS, tags=['vlan'])))\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(r1)\n loop.run_until_complete(r2)\n loop.run_until_complete(r3)\n loop.run_until_complete(r4)\n loop.run_until_complete(r5)\n loop.run_until_complete(r6)\n ports_dict.update(qrm_management_server)\n ports_dict.update(qrm_http_server_for_system_pending)\n return ports_dict\n\n\n@pytest.fixture(scope='function')\nasync def qrm_backend_with_db(redis_my) -> QueueManagerBackEnd:\n qrm_be = QueueManagerBackEnd(redis_port=REDIS_PORT)\n yield qrm_be\n await qrm_be.stop_backend()\n","repo_name":"final-israel/qrm","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":14835,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"13768547680","text":"from glob import glob\n\ndate_num = 0\ncar_total = 0\nimage_total = 0\nbase_path = '/home/centos/smb/jsl/图片数据'\n# base_path = '/Users/clustar/Desktop/项目/data0'\n# f = open('/Users/clustar/Desktop/项目/data_describe.txt', 'w')\nf = open('/home/centos/zhaoliang/data_statistics.txt', 'w')\ndate_list = glob(f'{base_path}/*')\ndate_set = [date.split('/')[-1] for date in date_list]\ndate_set = sorted(date_set)\nfor date_ in date_set:\n if date_.isdigit():\n date_num += 1\nprint(f'当前共有 {date_num} 天的数据')\n\nfor i in range(len(date_set)):\n car_list = glob(f'{base_path}/{date_set[i]}/*')\n car_set = [car.split('/')[-1] for car in car_list]\n if car_set:\n print(f'{date_set[i]}有 {len(car_set)} 车,车牌号是:{car_set}')\n f.write(f'{date_set[i]}有 {len(car_set)} 车,车牌号是:{car_set}'+'\\n')\n car_total = car_total + len(car_set)\nprint(f'目前总共有 {car_total} 车次')\n\nimage_list = glob(f'{base_path}/*/*/*')\nfor image in image_list:\n if image.endswith('jpg'):\n image_total += 1\n\nprint(f'当前数据集共有图片 {image_total} 张')\n\n","repo_name":"8125345/Test","sub_path":"cal_card_num.py","file_name":"cal_card_num.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16395765842","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable # Variable has been depricated. Now it just creates a tensor\n# https://pytorch.org/docs/stable/autograd.html#variable-deprecated\n\n\nclass LM_LSTM(nn.Module):\n \"\"\"Simple LSMT-based language model\"\"\"\n def __init__(self, hidden_dim, embedding_dim, num_steps, batch_size, vocab_size, num_layers, dp_keep_prob,\n bidirectional):\n super(LM_LSTM, self).__init__()\n self.hidden_dim = hidden_dim\n self.embedding_dim = embedding_dim\n self.bidirectional = bidirectional\n self.num_steps = num_steps\n self.batch_size = batch_size\n self.vocab_size = vocab_size\n self.dp_keep_prob = dp_keep_prob\n self.num_layers = num_layers\n self.dropout = nn.Dropout(1 - dp_keep_prob)\n self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(input_size=embedding_dim,\n hidden_size=hidden_dim,\n num_layers=num_layers,\n dropout=1 - dp_keep_prob,\n bidirectional=bidirectional)\n if self.bidirectional:\n self.num_directions = 2 # The number of directions of this LSTM (Bidirections is 2)\n else:\n self.num_directions = 1 # The number of directions of this LSTM (One direction is 1)\n self.linear_input = self.num_directions * self.hidden_dim\n self.sm_fc = nn.Linear(in_features=self.linear_input,\n out_features=vocab_size)\n self.init_weights()\n self.direction = None\n\n def init_weights(self):\n init_range = 0.1\n self.word_embeddings.weight.data.uniform_(-init_range, init_range)\n self.sm_fc.bias.data.fill_(0.0)\n self.sm_fc.weight.data.uniform_(-init_range, init_range)\n\n def init_hidden(self, batch_size=None):\n if batch_size is None:\n batch_size = self.batch_size\n weight = next(self.parameters()).data\n return (Variable(weight.new(self.num_layers * self.num_directions, batch_size, self.hidden_dim).zero_()),\n Variable(weight.new(self.num_layers * self.num_directions, batch_size, self.hidden_dim).zero_()))\n\n def forward(self, inputs, hidden, num_steps=None, batch_size=None):\n if num_steps is None:\n num_steps = self.num_steps\n if batch_size is None:\n batch_size = self.batch_size\n embeds = self.dropout(self.word_embeddings(inputs))\n lstm_out, hidden = self.lstm(embeds, hidden)\n lstm_out = self.dropout(lstm_out)\n logits = self.sm_fc(lstm_out.view(-1, self.linear_input))\n output = logits.view(num_steps, batch_size, self.vocab_size)\n return output, hidden\n\n\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor: #Variable:\n return Variable(h.data)\n else:\n return tuple(repackage_hidden(v) for v in h)\n","repo_name":"NedaTavakoli/Modeling-and-Clustering-Genome-using-Bidirectional-LSTM","sub_path":"forward_backward_model/lm.py","file_name":"lm.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"29093859662","text":"# . 模拟超市付款: 商品单价 商品数量\n# \t键盘上输入商品单价,以及商品数量,\n# \t 然后计算应付总额\n# \t 计算总额 float\n# \t提示用户可以有4种付款方式\n# \t 不同的付款方式有不同的折扣: 先展示四种付款方式\n# \t \t现金没有折扣\n# \t\t微信 0.95折\n# \t \t支付宝 鼓励金付款金额的10% 鼓励金可以直接折算到付款金额中\n# \t \t刷卡 满100-20\n\nprice = float(input('请输入商品的单价:\\n'))\ncount = int(input('请输入商品的数量'))\n# 总价\nsum_price = price * count\npay_method = int(input('请选择支付方式'))\nif pay_method == 1:\n print('现金支付 没有优惠 {}'.format(sum_price))\nelif pay_method == 2:\n print('现金支付 没有优惠 {}'.format(sum_price * 0.95))\nelif pay_method == 3:\n print('现金支付 没有优惠 {}'.format(sum_price * 0.9))\nelif pay_method == 3:\n print('现金支付 没有优惠 {}'.format(sum_price - sum_price//100 * 20))\n","repo_name":"zhangwei725/PythonBase","sub_path":"day06/homework_超市.py","file_name":"homework_超市.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"24721044075","text":"\nimport sys\n\nlengths = [ord(x) for x in sys.stdin.read().strip()] + [17, 31, 73, 47, 23]\n\nlist = [i for i in range(256)]\n\ndef rev(arr, i, j):\n while i < j:\n tmp = arr[i % len(arr)]\n arr[i % len(arr)] = arr[j % len(arr)]\n arr[j % len(arr)] = tmp\n i += 1\n j -= 1\n\npos = 0\nskip = 0\nfor _ in range(64):\n for length in lengths:\n rev(list, pos, pos + length - 1)\n pos += length + skip\n skip += 1\n\nres = ''\nfor i in range(16):\n v = 0\n for j in range(16):\n v ^= list[16 * i + j]\n res += '{:02x}'.format(v)\n\nprint('Result:', res)\n\n","repo_name":"rolandbernard/adventofcode-all","sub_path":"2017/10.knot-hash/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5045566317","text":"def fib():\r\n a,b = 1, 1\r\n while True:\r\n yield \"{}\".format(a)\r\n a, b = b, a + b\r\nplace2 = int(input(\"how many decimal places? \"))\r\nprevious = 0\r\ncount = 0\r\nchecker = 0\r\nplace = 0\r\nimport decimal\r\nimport time\r\nstart_time = time.time()\r\nwith decimal.localcontext() as ctx:\r\n for item in fib():\r\n if count != 0:\r\n ctx.prec = place+1\r\n item = decimal.Decimal(item)\r\n previous = decimal.Decimal(previous)\r\n golden = decimal.Decimal(item)/decimal.Decimal(previous) \r\n if checker == golden:\r\n if place != place2:\r\n place+=1\r\n place3 = place-1\r\n print(\"{0}: {1}\\n\".format(place3, golden))\r\n else:\r\n print(\"{0}: {1}\\n\".format(place, golden))\r\n break\r\n checker = golden\r\n previous = item\r\n count+=1\r\nend_time = time.time()\r\nprint(end_time - start_time)\r\nprint(\"took {0} iterations\".format(count-1))\r\nf = open(\"{0} golden ratio.txt\".format(place2), \"w\")\r\nf.write(\"{0}\".format(golden))\r\nf.close()\r\ninput()\r\n","repo_name":"lolzhunter/python-trash","sub_path":"golden ratio using fibonacci but the decimals constantly increase.py","file_name":"golden ratio using fibonacci but the decimals constantly increase.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69883776723","text":"# NEW LIBRARIES\nimport pandas as pd # Pandas Python library\nfrom sklearn.feature_extraction.text import (CountVectorizer, TfidfTransformer) # Sklearn Python library\nfrom sklearn.metrics.pairwise import cosine_similarity # Imports Cosine distance measure (other possibilities:\n # Euclidean and Manhattan)\n\nfrom sklearn.feature_extraction import text ### The Sklearn library has stopwords of its own, which is a \n ### frozenset that can be used with the function text.ENGLISH_STOP_WORDS. Additional items can\n ### be added to that list: text.ENGLISH_STOP_WORDS.union(additional_stopwords). \nfrom nltk.corpus import stopwords ### The NLTK library has its own stopwords package, too (requires \n ### nltk downloader to be installed). \n ### NB: Alternatively, stopwords can be removed manually in pre-processing.\n\nimport os, json, re, sys # NB: sys module allows code to introspect on the system in which its running\n # re for regular expressions\n\n# SCRIPT WITH OUR PREVIOUS FUNCTIONS\nimport functions\n\n###########################################################\n# VARIABLES ###############################################\n###########################################################\n\nsettings = functions.loadYmlSettings(\"settings.yml\")\nmy_stopwords_list = stopwords.words('english') + stopwords.words('french') ### Personalised stopword list \n ### combines English and French stopwords. \nmy_stopwords_list.extend('op pp ch dun dune ibid cf iv xvi xix xviii xix'.split()) ### Adds any other stopwords.\n\n###########################################################\n# MAIN FUNCTIONS ##########################################\n###########################################################\n\ndef filterTfidfDictionary(dictionary, threshold, lessOrMore): # This does the filtering with the\n # parameters dictionary, threshold and lessOrMore.\n dictionaryFilt = {} # Creates the dictionary.\n for item1, citeKeyDist in dictionary.items():\n dictionaryFilt[item1] = {} # Creates a dictionary.\n for item2, value in citeKeyDist.items():\n if lessOrMore == \"less\": # The 'less' argument\n if value <= threshold: # filters out items that are below the threshold.\n if item1 != item2:\n dictionaryFilt[item1][item2] = value\n elif lessOrMore == \"more\": # The 'more' argument\n if value >= threshold: # filters out items that are above the threshold.\n if item1 != item2:\n dictionaryFilt[item1][item2] = value\n else:\n sys.exit(\"`lessOrMore` parameter must be `less` or `more`\") # When the argument is neither\n # less nor more, the execution of the script stops. Prints an error message on the\n # screen. When done right, the execution of the script will never stop, but this \n # safeguards against mistakes.\n\n if dictionaryFilt[item1] == {}: # Filtering out items from the dictionary leaves empty items. This\n # removes items that hold no information whatsoever.\n dictionaryFilt.pop(item1)\n return(dictionaryFilt) # Results in long json dictionary files.\n\n\ndef tfidfPublications(pathToMemex): # tfidfPublications is the function\n # PART 1: loading OCR files into a corpus\n ocrFiles = functions.dicOfRelevantFiles(pathToMemex, \".json\") # Gets the dictionary of files with \n # OCR results. The .json file extension is not ideal for keeping files separated,\n # so use a non-existent file extension instead, such as .OCRED, when generating \n # the corpus.\n citeKeys = list(ocrFiles.keys())#[:500] # Gets the cite keys from the dictionary, providing more\n # freedom for processing the information. \n\n print(\"\\taggregating texts into documents...\")\n docList = [] # Creates 2 lists: document list for documents\n docIdList = [] # and a list of Ids of the documents.\n\n for citeKey in citeKeys: # Loops through the citekyes\n docData = json.load(open(ocrFiles[citeKey])) # Reads every publication one after the other. \n # docData loads the json file.\n # IF YOU ARE ON WINDOWS, THE LINE SHOULD BE:\n # docData = json.load(open(ocrFiles[citeKey], \"r\", encoding=\"utf8\"))\n \n docId = citeKey # docId is a key in the cite key\n doc = \" \".join(docData.values()) # Grabs all of the pages from the loaded json dictionary and \n # merges it.\n\n # clean doc\n # NB: Place any text pre-processing steps here\n doc = re.sub(r'(\\w)-\\n(\\w)', r'\\1\\2', doc) # word regular expression\n doc = re.sub(r'\\W+', ' ', doc) # not word regular expression\n doc = re.sub('_+', ' ', doc) \n doc = re.sub(r'\\d+', ' ', doc) # digit regular expression\n doc = re.sub(' +', ' ', doc)\n\n # update lists\n docList.append(doc) # Updates 2 lists with ids \n docIdList.append(docId) # and corresponding complete documents.\n\n print(\"\\t%d documents generated...\" % len(docList))\n\n # PART 2: calculate tfidf metric for all loaded publications and distances\n # Sklearn and pandas library vectorise the corpus and generate these 2 large matrixes with distances\n # and tf-idf values.\n print(\"\\tgenerating tfidf matrix & distances...\") \n vectorizer = CountVectorizer(ngram_range=(1,1), min_df=5, max_df=0.5, stop_words=my_stopwords_list) \n # Functions in Sklearn have a\n # variety of parameters. The ngram range can be changed to (1,2) to take\n # bigrams into account. Minimum and maximum document frequency is the \n # filterting for the calculations. \n # Stopwords can also be used as parameters, i.e.: stop_words=english. \n # Stopwords need to be explicitly declared, stopword lists can be merged together.\n # Min_df=5 integer exludes all the vocabulary that is used in less than 5\n # texts in the corpus. Max_df=0.5 float is a percentage that excludes all \n # the vocabulary that is used in more than half of the text.\n countVectorized = vectorizer.fit_transform(docList) # Takes all of the documents and creates a vector\n # from all of words. Each text will be mapped against that vector for words. \n tfidfTransformer = TfidfTransformer(smooth_idf=True, use_idf=True) # Function has parameters to modify it.\n # smooth_idf adds extra steps and fits in as a value between 0 and 1. \n vectorized = tfidfTransformer.fit_transform(countVectorized) # Generates a sparse matrix of the tf-idf\n # values (countVectorized)\n cosineMatrix = cosine_similarity(vectorized) # Creates the matrix of the cosine similarities. \n # NB: Matrixes as a data format speed up these calculations, but need to be converted into a dictionary.\n\n # PART 3: saving TFIDF values\n print(\"\\tsaving tfidf data...\")\n tfidfTable = pd.DataFrame(vectorized.toarray(), index=docIdList, columns=vectorizer.get_feature_names())\n # Generates a data frame from the matrix in the table.\n tfidfTable = tfidfTable.transpose() # Transposes the table. The original matrix consists of \n # rows and columns, so it needs to be transposed so that each column is \n # a document and each row is information on the terms.\n print(\"\\ttfidfTable Shape: \", tfidfTable.shape)\n tfidfTableDic = tfidfTable.to_dict() # Converts it into a dictionary\n\n tfidfTableDicFilt = filterTfidfDictionary(tfidfTableDic, 0.05, \"more\") # The filtering function takes\n # the dictionary, takes the value for filtering (which is just 'more', an extra \n # switch to keep the values above or below the threshold).\n pathToSave = os.path.join(pathToMemex, \"results_tfidf.dataJson\") # Saves the result.\n with open(pathToSave, 'w', encoding='utf8') as f9:\n json.dump(tfidfTableDicFilt, f9, sort_keys=True, indent=4, ensure_ascii=False) # Dumps the results \n # into a json file.\n\n # PART 3: saving cosine distances\n print(\"\\tsaving cosine distances data...\")\n cosineTable = pd.DataFrame(cosineMatrix) # Generates a data frame from the cosine matrix.\n print(\"\\tcosineTable Shape: \", cosineTable.shape)\n cosineTable.columns = docIdList # Defines columns in cosine table.\n cosineTable.index = docIdList # Defines index in cosine table.\n cosineTableDic = cosineTable.to_dict() # Converts the data frame into a dictionary.\n\n tfidfTableDicFilt = filterTfidfDictionary(cosineTableDic, 0.25, \"more\") # Filtering function takes the\n # cosine table dictionary and the value for filtering (\"more\" to keep the values above\n # the threshold).\n pathToSave = os.path.join(pathToMemex, \"results_cosineDist.dataJson\") # Saves the results.\n with open(pathToSave, 'w', encoding='utf8') as f9: # Dumps the results\n json.dump(tfidfTableDicFilt, f9, sort_keys=True, indent=4, ensure_ascii=False) # into a json file.\n\ntfidfPublications(settings[\"path_to_memex\"])","repo_name":"dashaevsina/MEMEX_SANDBOX","sub_path":"_misc/3_TFIDF_Distance_annotated.py","file_name":"3_TFIDF_Distance_annotated.py","file_ext":"py","file_size_in_byte":9746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32864198794","text":"import sys\nimport os\nimport re\nimport pickle\n\nimport numpy as np\n\n\ndef extract_segments(arktxt):\n segments = []\n\n with open(arktxt, encoding=\"utf-8\") as ark:\n for line in ark:\n # Extract data fields\n _, time_info, data = list(\n re.match(\n r\"(.*)_(\\d{6}.\\d{3}_\\d{6}.\\d{3}-\\d{8}-\\d{8}) (.*)\", line\n ).groups())\n\n # Get timestamps for vad segment & xvector segment\n vad_ts, seg_ts = time_info.split(\"-\", 1)\n\n # Get start/end times in seconds\n vad_start, vad_end = map(float, vad_ts.split(\"_\"))\n seg_start, seg_end = map(lambda x: float(x) / 100,\n seg_ts.split(\"-\"))\n\n # Calculate absolute start/end time for the xvector segment\n start = (vad_start + seg_start)\n end = (vad_start + seg_end)\n\n # Extract xvector data\n data = re.sub(r\"(\\[|\\])\", \"\", data).strip()\n data = np.fromstring(data, dtype=float, sep=\" \")\n\n segments.append((start, end, data))\n\n return segments\n\n\nclass SegmentsHandler:\n\n def __init__(self, xvectors, seg_every=0.75):\n self._xvectors = xvectors\n self._seg_every = seg_every\n\n def find_segments(self, start, end):\n segments = []\n\n for seg_start, seg_end, xvector in self._xvectors:\n if seg_start >= start and seg_end <= end:\n segments.append(xvector)\n\n return segments\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 5:\n print(\"usage: python3 filter-unvoiced-segments.py \"\n \" \")\n sys.exit(1)\n\n thr = float(sys.argv[1])\n model_path = sys.argv[2]\n rttm_dir = sys.argv[3]\n arktxt = sys.argv[4]\n\n handler = SegmentsHandler(extract_segments(arktxt))\n\n with open(model_path, \"rb\") as f:\n clf, class_names = pickle.load(f)\n\n filtered_rttm = open(os.path.join(rttm_dir, \"rttm.filt\"), \"w\")\n\n with open(os.path.join(rttm_dir, \"rttm\")) as f:\n for line in f:\n parts = line.split()\n start = float(parts[3])\n end = start + float(parts[4])\n\n xvectors = handler.find_segments(start, end)\n\n if xvectors:\n decision = clf.predict(xvectors)\n\n if decision.mean() < thr:\n continue\n else:\n filtered_rttm.write(line)\n else:\n # Keep the line if there is no xvector found for the segment\n filtered_rttm.write(line)\n\n filtered_rttm.close()\n","repo_name":"bbc/bbc-speech-segmenter","sub_path":"recipe/local/filter_unvoiced_segments.py","file_name":"filter_unvoiced_segments.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"3"} +{"seq_id":"31558090648","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.constants import c\nfrom scipy.interpolate import splrep\nfrom scipy.interpolate import splev\nfrom scipy.interpolate import bisplrep\nfrom scipy.interpolate import bisplev\nfrom scipy.interpolate import RectBivariateSpline\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.interpolate import spline\nfrom scipy.signal import fftconvolve\n\nZERO_TOLERANCE = 1e-6\n\n\ndef flatten_2d(arr):\n \"\"\"\n Flattens 2-dim array\n\n :param arr: 2d array\n :return:\n \"\"\"\n newarr = []\n if any([isinstance(subarr, (list, tuple)) for subarr in arr]):\n for subarr in arr:\n if isinstance(subarr, (tuple, list)):\n newarr.extend(subarr)\n else:\n newarr.append(subarr)\n return newarr\n else:\n return arr\n\n\ndef instrumental_broadening(wave, flux, width=0.25, width_type='fwhm', interpolate_back=True):\n \"\"\"\n\n A convolution of a spectrum with a normal distribution.\n :param: wave:\n :param: flux:\n :param width:\n :param width_type:\n :return:\n \"\"\"\n # print \"Computing instr. broadening.\"\n # If there is no broadening to apply, don't bother\n if width < ZERO_TOLERANCE:\n return flux\n\n # Convert user input width type to sigma (standard devation)\n width_type = width_type.lower()\n if width_type == 'fwhm':\n sigma = width / 2.3548\n elif width_type == 'sigma':\n sigma = width\n else:\n raise ValueError((\"Unrecognised width_type='{}' (must be one of 'fwhm'\"\n \"or 'sigma')\").format(width_type))\n\n # Make sure the wavelength range is equidistant before applying the\n # convolution\n delta_wave = np.diff(wave).min()\n range_wave = wave.ptp()\n n_wave = int(range_wave / delta_wave) + 1\n wave_ = np.linspace(wave[0], wave[-1], n_wave)\n # flux_ = np.interp(wave_, wave, flux)\n flux_ = interpolate_spec(wave, flux, wave_)\n dwave = wave_[1] - wave_[0]\n n_kernel = int(2 * 4 * sigma / dwave)\n\n # The kernel might be of too low resolution, or the the wavelength range\n # might be too narrow. In both cases, raise an appropriate error\n if n_kernel == 0:\n raise ValueError((\"Spectrum resolution too low for \"\n \"instrumental broadening (delta_wave={}, \"\n \"width={}\").format(delta_wave, width))\n elif n_kernel > n_wave:\n raise ValueError((\"Spectrum range too narrow for \"\n \"instrumental broadening\"))\n\n # Construct the broadening kernel\n wave_k = np.arange(n_kernel) * dwave\n wave_k -= wave_k[-1] / 2.\n kernel = np.exp(- (wave_k) ** 2 / (2 * sigma ** 2))\n kernel /= sum(kernel)\n\n # Convolve the flux with the kernel\n flux_conv = fftconvolve(1 - flux_, kernel, mode='same')\n\n # And interpolate the results back on to the original wavelength array,\n # taking care of even vs. odd-length kernels\n if n_kernel % 2 == 1:\n offset = 0.0\n else:\n offset = dwave / 2.0\n\n if interpolate_back:\n flux = np.interp(wave + offset, wave_, 1 - flux_conv, left=1, right=1)\n # flux = interpolate_spec(wave_, 1-flux_conv, wave+offset)\n # Return the results.\n return flux\n\n\ndef interpolate_block(x, block, xnew):\n \"\"\"\n Interpolates in each line of a 2d array.\n\n :param x: independent variable\n :type x: numpy.float64\n :param block: 2d array for each column f(x)= block[i]\n :type block: numpy.float64\n :param xnew: point at which it is interpolated\n :type xnew: float\n :return:\n \"\"\"\n intens = np.zeros(len(block[0]))\n n = len(block[:, 0])\n\n # set up the order of interpolation\n if n > 4:\n k = 3\n else:\n k = n - 1\n # k=3\n\n # TODO Can thius be done faster with bisplrep and bisplev\n # do the interpolation\n for i in range(0, len(block[0])):\n y = block[:, i]\n\n tck = splrep(x, y, k=k)\n intens[i] = splev(xnew, tck, der=0)\n\n return intens\n\n\ndef interpolate_block_faster(x, block, xnew):\n \"\"\"\n Interpolation of teh spectra... hopefully faster?\n\n :param x:\n :param block:\n :param xnew:\n :return:\n \"\"\"\n\n # length of the datablock\n nx = len(block[0])\n ny = len(x)\n # print x\n\n if (ny > 3) & (ny < 6):\n ky = 3\n elif ny > 5:\n ky = 5\n else:\n ky = ny - 1\n\n # print ky\n\n f = RectBivariateSpline(x, np.arange(nx), block, kx=ky, ky=1)\n intens = f(xnew, np.arange(nx))[0]\n\n return intens\n\n\ndef interpolate_spec(wave0, intens0, wave1):\n \"\"\"\n Defines a function intens0 = f(wave0) and\n than interpolates in it at wave1.\n\n :param wave0: initial wavelength array\n :type wave0: numpy.float64\n :param intens0: initial intensity array\n :type intens0: numpy.float64\n :param wave1: wavelength array at which we interpolate\n :type wave1: numpy.float64\n :return intens1: final intensity array\n :rtype intens1: numpy.float64\n \"\"\"\n tck = splrep(wave0, intens0, k=3)\n intens1 = splev(wave1, tck)\n\n return intens1\n\n\ndef is_within_interval(v, arr):\n \"\"\"\n Tests whether value v lies within interval [min(arr); max(arr)]\n\n :param v: tested values\n :type v: numpy.float64\n :param arr: tested array\n :type v: numpy.float64\n :return:\n :param:\n :type: bool\n \"\"\"\n # print v, max(arr), min(arr)\n if (v - max(arr) > ZERO_TOLERANCE) | (min(arr) - v > ZERO_TOLERANCE):\n return False\n else:\n return True\n\n\ndef generate_least_number(l):\n \"\"\"\n Goes over integer in list and finds the\n smallest integer not in the list.\n\n :param l: the list\n :return: int the smallest integer\n \"\"\"\n num = 0\n while num in l:\n num += 1\n return num\n\n\ndef keys_to_lowercase(d):\n \"\"\"\n Converts dictionary keys to lowercase\n\n :param d the converted dictionary\n :return: dnew\n \"\"\"\n\n dnew = {}\n for key in d.keys():\n keynew = key.lower()\n dnew[keynew] = d[key]\n\n return dnew\n\n\ndef parlist_to_list(l, property='value'):\n \"\"\"\n Converts a list of Parameter class to a\n regular list - only the property is returned\n\n :param l:\n :param prop:\n :return:\n \"\"\"\n ol = []\n for par in l:\n ol.append(par[property])\n\n return ol\n\n\ndef sum_dict_keys(d):\n \"\"\"\n Sums dictionary key records.\n\n :param d: the dictionary\n :return: s the sum\n \"\"\"\n s = 0.0\n for key in d.keys():\n s += d[key]\n return s\n\n\ndef read_text_file(f):\n \"\"\"\n Reads ascii file f.\n\n :param f: the file\n :type f: str\n :return lines: list of all lines within file f\n :rtype: list\n \"\"\"\n\n ifile = open(f, 'r')\n lines = ifile.readlines()\n ifile.close()\n\n return lines\n\n\ndef renew_file(f):\n \"\"\"\n Deletes an existing file.\n\n :param f:\n :return:\n \"\"\"\n ofile = open(f, 'w')\n ofile.close()\n\n\ndef rotate_spectrum(wave, intens, vrot, epsilon=0.6, interpolate_back=True):\n \"\"\"\n Rotates a spectrum represented by arrays wave and intes to the prjected\n rotational velocity vrot.\n\n :param wave: wavelength array\n :type wave: numpy.float64\n :param intens: intensity array\n :type intens: numpy.float64\n :param vrot: projected rotational velocity in km/s\n :type vrot: float\n :param epsilon: Coefficient of linear limb-darkening.\n :type epsilon: float\n :param interpolate_back: interpolate the spectrum back to the original wavelength sampling\n :type interpolate_back: bool\n :return intens: the rotated spectrum in the original wavelength sanmpling\n :rtype intens: numpy.float64\n :return intens_conv: the rotated spectrum equidistant in rv\n :rtype intens_conv: numpy.float64\n :return wave_conv: the wavelength array equidistant in rv\n :rtype wave_conv: numpy.float64\n \"\"\"\n if vrot > ZERO_TOLERANCE:\n # we need it equidistant in RV\n wave_log = np.log(wave)\n rv = np.linspace(wave_log[0], wave_log[-1], len(wave))\n step = rv[1] - rv[0]\n\n # interpolate\n intens_rv = interpolate_spec(wave_log, intens, rv)\n\n # scale rotational velocity with light speed\n vrot = 1000 * vrot / c.value\n\n # get the kernel\n # velocity vector\n n = int(np.ceil(2 * vrot / step))\n rv_ker = np.arange(n) * step\n rv_ker = rv_ker - rv_ker[-1] / 2.\n y = 1 - (rv_ker / vrot) ** 2\n\n # the kernel\n kernel = (2 * (1 - epsilon) * np.sqrt(y) + np.pi * epsilon / 2. * y) / (np.pi * vrot * (1 - epsilon / 3.0))\n kernel = kernel / kernel.sum()\n\n # convolve the flux\n intens_conv = fftconvolve(1 - intens_rv, kernel, mode='same')\n if n % 2 == 1:\n rv = np.arange(len(intens_conv)) * step + rv[0]\n else:\n rv = np.arange(len(intens_conv)) * step + rv[0] - step / 2.\n\n wave_conv = np.exp(rv)\n\n # interpolate back\n if interpolate_back:\n intens = interpolate_spec(wave_conv, 1 - intens_conv, wave)\n return intens\n else:\n return 1 - intens_conv, wave_conv\n\n\ndef shift_spectrum(wave, RV):\n \"\"\"\n Doppler-shifts spectrum.\n :param wave: original wavelength array\n :type wave: numpy.float64\n :param RV: radial velocity in km/s\n :type RV: float\n :return new_wave: shifted wavelength array\n :rtype new_wave: numpy.float64\n\n \"\"\"\n # shifts the wavelengths\n new_wave = wave * (1 + RV * 1000 / c.value)\n\n return new_wave\n\n\ndef select_index_for_multiple_keywords(d, **kwargs):\n \"\"\"\n From a dictionary of lists selects\n one index meeting all requirements.\n\n :param kwargs:\n :return:\n \"\"\"\n keys = d.keys()\n length = len(d[keys[0]])\n\n for i in range(0, length):\n for k in keys:\n if d[k] == kwargs[k] and k == keys[-1]:\n return i\n return -1\n\n\ndef string2bool(s):\n \"\"\"\n Converts string to boolean.\n\n :param s:\n :return:\n \"\"\"\n if s.lower() in ['true', '1']:\n return True\n else:\n return False\n\n\ndef write_numpy(f, cols, fmt):\n \"\"\"\n An example of lack of brain of the main developer of this \"code\".\n\n :param f: outputfile or handler\n :param cols: block of data to be writte\n :param fmt: format of the blocs\n :return: None\n \"\"\"\n\n np.savetxt(f, cols, fmt=fmt)\n","repo_name":"chrysante87/pyterpol","sub_path":"synthetic/auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":10363,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"20298813203","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 1 22:17:33 2023\r\n\r\n@author: HTony03\r\n\"\"\"\r\n\r\n\r\n\r\nimport pandas\r\nimport random,configparser\r\nimport os,base64\r\nfrom configparser import NoOptionError\r\n\r\ncfg_file = r\"C:\\\\random.cfg\"\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read(cfg_file)\r\nrouteall = config.get(\"csv_route\",\"csv_all\")\r\nrouteboys = config.get(\"csv_route\",\"csv_boys\")\r\nroutegirls = config.get(\"csv_route\",\"csv_girls\")\r\nlenall = config.get(\"csv_route\",\"csv_all_len\")\r\nlenboys = config.get(\"csv_route\",\"csv_boys_len\")\r\nlengirls = config.get(\"csv_route\",\"csv_girls_len\")\r\nlanguages = config.get(\"random_configs\",\"language\")\r\ndef readcsv(route):\r\n return pandas.read_csv(os.path.join(os.getcwd(),route),encoding=\"ANSI\",index_col=False)\r\n\r\ndef createcsv(lang):\r\n while 1:\r\n try:\r\n if lang == 1:\r\n model = int(input(\"copy from which name model(1:all students 2:girls 3:boys):\"))\r\n elif lang == 2:\r\n model = int(input(\"从哪个学生模型复制(1:所有学生 2:女同学 3:男同学):\"))\r\n except ValueError:\r\n if lang == 1:\r\n print(\"Input error\")\r\n else:\r\n print(\"输入错误\")\r\n else:\r\n if model != 1 and model != 2 and model != 3:\r\n if lang == 1:\r\n print(\"Input error\")\r\n else:\r\n print(\"输入错误\")\r\n else:\r\n break\r\n \r\n if lang == 1:\r\n name = input(\"the model name:\")\r\n while 1:\r\n try:\r\n index = int(input(\"index of the model:\"))\r\n except ValueError:\r\n print(\"Input error\")\r\n else:\r\n if index != 1 and index != 2 and index != 3:\r\n print(\"Input error\")\r\n else:\r\n break\r\n while 1:\r\n\r\n del_name = input(\"delete name after chosen it(True:yes,False:no):\")\r\n if del_name != \"True\" and del_name != \"False\":\r\n print(\"输入错误\")\r\n else:\r\n break\r\n else:\r\n name = input(\"模型名称:\")\r\n while 1:\r\n try:\r\n index = int(input(\"模型序号:\"))\r\n except ValueError:\r\n print(\"输入错误\")\r\n else:\r\n if index != 1 and index != 2 and index != 3:\r\n print(\"输入错误\")\r\n else:\r\n break\r\n while 1:\r\n del_name = input(\"摇到名字后是否删除名字(True:是,False:否):\")\r\n if del_name != \"True\" and del_name != \"False\":\r\n print(\"输入错误\")\r\n else:\r\n break\r\n #copy csv\r\n if model == 1:\r\n names = readcsv(routeall)\r\n elif model == 2:\r\n names = readcsv(routeboys)\r\n elif model == 3:\r\n names = readcsv(routeboys)\r\n try:\r\n names.to_csv(r\"\"\"C:\\rand\\ \"\"\"+name+\".csv\",mode=\"x\",encoding=\"ANSI\",index=False)\r\n except FileExistsError:\r\n name = \"custom_config\"+str(index)\r\n names.to_csv(r\"\"\"C:\\rand\\ \"\"\"+name+\".csv\",mode=\"x\",encoding=\"ANSI\",index=False)\r\n #\r\n cfg_file = r\"C:\\\\random.cfg\"\r\n conf = configparser.ConfigParser()\r\n cfgfile = open(cfg_file,\"r+\")\r\n conf.read(cfg_file)\r\n conf.set(\"csv_route\",\"self_config_csv\"+str(index)+\"_name\",name)\r\n conf.set(\"csv_route\",\"self_config_csv\"+str(index)+\"_len\",str(len(names)))\r\n conf.set(\"csv_route\",\"self_config_csv\"+str(index)+\"_route\",r\"\"\"C:\\rand\\ \"\"\"+name+\".csv\")\r\n conf.set(\"csv_route\",\"self_config_csv\"+str(index)+\"_delete_name\",del_name)\r\n conf.write(cfgfile)\r\n cfgfile.close()\r\n \r\n print(\"new csv created as \"+name+\".csv in the rand folder\")\r\nif languages == \"Chinese\":\r\n lang = 2\r\nelse:\r\n lang = 1\r\ncreatecsv(lang)\r\nos.system(\"pause\")","repo_name":"HTony03/Roll-counter","sub_path":"create_new_custom_csv.py","file_name":"create_new_custom_csv.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"885263434","text":"import unittest\nfrom math import radians\n\nfrom geometry_msgs.msg import Point\n\nfrom pilz_robot_programming import *\n\nfrom pilz_industrial_motion_testutils.integration_test_utils import *\n\nAPI_VERSION = \"1\"\n\n_JOINT_LIMITS_DEGREE = {\n 'prbt_joint_1': 170,\n 'prbt_joint_2': 145,\n 'prbt_joint_3': 135,\n 'prbt_joint_4': 170,\n 'prbt_joint_5': 170,\n 'prbt_joint_6': 179\n}\n_JOINT_POSITIONS_TOLERANCE = 0.001\n_JOINT_LIMIT_OVERSTEP = 0.1\n\n\nclass TestJointPositionLimits(unittest.TestCase):\n\n def setUp(self):\n self.robot = Robot(API_VERSION)\n self.joint_names = _JOINT_LIMITS_DEGREE.keys()\n self.joint_names = sorted(self.joint_names)\n\n def tearDown(self):\n if hasattr(self, 'robot'):\n self.robot._release()\n self.robot = None\n\n def _check_joint_limits(self):\n \"\"\" Check if current joint positions are within the limits.\n \"\"\"\n positions = self.robot.get_current_joint_states()\n\n for i in range(len(self.joint_names)):\n name = self.joint_names[i]\n position = positions[i]\n limit = radians(_JOINT_LIMITS_DEGREE[name])+_JOINT_POSITIONS_TOLERANCE\n\n self.assertGreater(position, -limit, 'Joint ' + name + ' violates lower limit. Position: ' + str(position))\n self.assertLess(position, limit, 'Joint ' + name + ' violates upper limit. Position: ' + str(position))\n\n def _joint_limit_reaching_test(self, joint_name):\n \"\"\" Test if the robot can be commanded to move exactly to the limits\n\n Test Sequence:\n 1. Command a movement to the lower limit.\n 2. Command a movement to the upper limit.\n\n Expected Results:\n 1. Trajectory is executed successfully.\n 2. Trajectory is executed successfully.\n \"\"\"\n index = self.joint_names.index(joint_name)\n limit = _JOINT_LIMITS_DEGREE[joint_name]\n\n lower_positions = [0] * len(self.joint_names)\n lower_positions[index] = -radians(limit)\n\n try:\n self.robot.move(Ptp(goal=lower_positions))\n except RobotMoveFailed:\n self.fail('Failed moving exactly to lower limit')\n\n upper_positions = [0] * len(self.joint_names)\n upper_positions[index] = radians(limit)\n\n try:\n self.robot.move(Ptp(goal=upper_positions))\n except RobotMoveFailed:\n self.fail('Failed moving exactly to upper limit')\n\n def _joint_limit_overstepping_test(self, joint_name):\n \"\"\" Test if the robot does not overstep the limits\n\n Test Sequence:\n 1. Command a movement to the home position.\n 2. Command a movement overstepping the lower limit.\n 3. Command a movement overstepping the upper limit.\n\n Expected Results:\n 1. Trajectory is executed successfully.\n 2. Trajectory execution is aborted and the robot does not overstep the limits.\n 3. Trajectory execution is aborted and the robot does not overstep the limits.\n \"\"\"\n index = self.joint_names.index(joint_name)\n limit = _JOINT_LIMITS_DEGREE[joint_name]\n\n lower_positions = [0] * len(self.joint_names)\n lower_positions[index] = -(radians(limit) + _JOINT_LIMIT_OVERSTEP)\n\n self.assertRaises(RobotMoveFailed, self.robot.move, Ptp(goal=lower_positions))\n self._check_joint_limits()\n\n upper_positions = [0] * len(self.joint_names)\n upper_positions[index] = radians(limit) + _JOINT_LIMIT_OVERSTEP\n\n self.assertRaises(RobotMoveFailed, self.robot.move, Ptp(goal=upper_positions))\n self._check_joint_limits()\n\n def test_joint_joint_limits_reaching(self):\n \"\"\" Perform all reaching tests.\n \"\"\"\n for name in self.joint_names:\n self._joint_limit_reaching_test(name)\n\n def test_joint_joint_limits_overstepping(self):\n \"\"\" Perform all overstepping tests.\n \"\"\"\n for name in self.joint_names:\n self._joint_limit_overstepping_test(name)\n\n\nif __name__ == '__main__':\n import rostest\n rospy.init_node('test_joint_position_limits')\n rostest.rosrun('pilz_robot_programming', 'test_joint_position_limits', TestJointPositionLimits)\n","repo_name":"PilzDE/pilz_industrial_motion","sub_path":"pilz_robot_programming/test/integrationtests/tst_joint_position_limits.py","file_name":"tst_joint_position_limits.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"3"} +{"seq_id":"75044014480","text":"import numpy as np\nimport copy\n\ndef state2index_2(length, L, O):\n if O == 'B':\n o_index = 0\n if O == 'S':\n o_index = 1\n if O == 'I':\n o_index = 2\n if O == 'F':\n o_index = 3\n if sum(L) == 0:\n return o_index\n for i in range(length):\n if L[i] != 0:\n return 1 * 4 + o_index\n return 2 * 4 + o_index\n\nclass TSRA_AGENT():\n def __init__(self, D, arrival_rate, learning_rate, length=1):\n self.D = D\n self.arrival_rate = arrival_rate\n self.epsilon = 1\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.learning_rate = learning_rate\n self.queue = [0] * self.D\n self.state = []\n self.length = length\n self.Q_table = np.zeros(shape=(3 * 4, 2), dtype=float)\n self.rho = 0\n\n self.initialize()\n\n def initialize(self):\n # initailize queue\n for i in range(self.D):\n if self.arrival_rate > np.random.uniform():\n self.queue[i] = 1\n # initailize action\n if 0.5 > np.random.uniform() and sum(self.queue) != 0:\n self.action = 1\n else:\n self.action = 0\n self.state.append('B')\n self.state.append(copy.deepcopy(self.queue))\n\n def update_Q_table(self, observation):\n self.state = self.state[-4:]\n index_1 = state2index_2(length=self.length, L=self.state[-3], O=self.state[-4])\n index_2 = state2index_2(length=self.length, L=self.state[-1], O=self.state[-2])\n if observation == 'B' or observation == 'S':\n reward = 10\n elif observation == 'F' and self.action == 1: # collsion or channel\n reward = -5\n elif observation == 'I' and self.queue[0] == 1: # need to transmit\n reward = -3\n else: # 'F' and action = 0, 'I' and no packet\n reward = 2\n self.Q_table[index_1][self.action] += self.learning_rate \\\n * (reward + np.max(self.Q_table[index_2]) - self.Q_table[index_1][self.action] - self.rho)\n self.rho += self.learning_rate * (reward + np.max(self.Q_table[index_2]) - self.Q_table[index_1][self.action] - self.rho)\n\n def update_queue(self, observation):\n self.state.append(observation)\n if observation == 'S': # transmit a packet\n self.queue[self.queue.index(1)] = 0\n self.queue[:-1] = self.queue[1:]\n if self.arrival_rate > np.random.uniform():\n self.queue[-1] = 1\n else:\n self.queue[-1] = 0\n self.state.append(copy.deepcopy(self.queue))\n\n def select_action(self, observation, **kwargs):\n # 0 is wait, 1 is transmit\n ###########################################\n index = state2index_2(length=self.length, L=self.queue, O=observation)\n\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if self.epsilon > np.random.uniform():\n self.action = round(np.random.uniform())\n else:\n self.action = np.argmax(self.Q_table[index])\n if sum(self.queue) == 0:\n self.action = 0\n\n def update(self, observation, **kwargs):\n self.update_queue(observation)\n self.update_Q_table(observation)\n self.select_action(observation, **kwargs)\n\n","repo_name":"DanzhouWu/TSRA","sub_path":"multi_device/TSRA/tsra_agent.py","file_name":"tsra_agent.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"6537534637","text":"import json\nimport os\nimport time\nfrom json.decoder import JSONDecodeError\nfrom typing import Any, Dict, Optional\n\nimport requests\nfrom requests.exceptions import HTTPError\n\nJson = Dict[str, Any]\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef get_temp_credentials(\n tutorialName: Optional[str] = None,\n credentialProvider: str = \"https://tutorials.arangodb.cloud:8529/_db/_system/tutorialDB/tutorialDB\",\n) -> Json:\n creds_file = f\"{dir_path}/data/creds.json\"\n\n try:\n with open(creds_file) as file:\n cache: Json = json.load(file)\n verify_url = f\"{cache['url']}/_db/{cache['dbName']}/_api/collection\"\n response = requests.get(verify_url, auth=(cache[\"username\"], cache[\"password\"]))\n response.raise_for_status()\n\n print(\"Success: reusing cached credentials\")\n return cache\n\n except (JSONDecodeError, HTTPError):\n print(\"Log: requesting new credentials...\")\n url = credentialProvider\n body = {\"tutorialName\": tutorialName} if tutorialName else \"{}\"\n response = requests.post(url, data=json.dumps(body))\n response.raise_for_status()\n\n data: Json = response.json()\n data[\"url\"] = f\"https://{data['hostname']}:{str(data['port'])}\"\n\n with open(creds_file, \"w+\") as outfile:\n json.dump(data, outfile)\n outfile.close()\n\n time.sleep(10) # Give instance enough time to provision\n\n print(\"Succcess: new credentials acquired\")\n return data\n","repo_name":"arangodb/adb-cloud-connector","sub_path":"adb_cloud_connector/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18343444207","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot5/blob/main/LICENSE\n\nimport os\n\nimport numpy as np\nimport pytest\nimport skhep_testdata\n\nimport uproot\n\n\ndef test():\n with uproot.open(skhep_testdata.data_path(\"uproot-mc10events.root\")) as file:\n tree = file[\"Events\"]\n assert tree[\"Muon\"].array(library=\"np\").tolist() == [\n 1,\n 0,\n 3,\n 3,\n 0,\n 1,\n 5,\n 0,\n 1,\n 0,\n ]\n","repo_name":"scikit-hep/uproot5","sub_path":"tests/test_0438_TClonesArray_is_not_AsGrouped.py","file_name":"test_0438_TClonesArray_is_not_AsGrouped.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"3"} +{"seq_id":"71231642003","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom time import sleep\r\n\r\n# Inicializar o webdriver\r\nwebdriver_instance = webdriver.Chrome()\r\nwebdriver_instance.implicitly_wait(10) # Aguardar até 10 segundos por elementos\r\n\r\n# Acessar a página de login\r\nwebdriver_instance.get('https://site/para/fazer/login')\r\nsleep(2)\r\n\r\n# Preencher o formulário de login\r\nusuario = webdriver_instance.find_element(By.NAME, 'j_username')\r\nusuario.send_keys('usuario-para-login')\r\nsenha = webdriver_instance.find_element(By.NAME, 'j_password')\r\nsenha.send_keys('senha-do-usuario')\r\nbutton_login = webdriver_instance.find_element(By.CSS_SELECTOR, 'input.btn.btn-lg.btn-primary.btn-block')\r\nbutton_login.click()\r\nsleep(2)\r\n\r\n# Ler os itens do arquivo input.txt e construir URLs\r\nwith open('input.txt', 'r') as file:\r\n items = file.read().splitlines()\r\n\r\nbase_url = 'https://base/da/url/do/site='\r\nfor item in items:\r\n target_url = base_url + item\r\n webdriver_instance.get(target_url)\r\n\r\n # Salvar o HTML da página em um arquivo de texto\r\n filename = f'html_{item}.txt'\r\n with open(filename, 'w', encoding='utf-8') as html_file:\r\n html_file.write(webdriver_instance.page_source)\r\n\r\n # Realize as operações necessárias na página\r\n # ...\r\n sleep(2)\r\n\r\n# Fechar o navegador ao final\r\nwebdriver_instance.quit()\r\n","repo_name":"Ageu48/AutomacaoSelenium-e-Python","sub_path":"buscador-de-dados-web/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6105603932","text":"import sys\nfrom PyQt5.QtWidgets import QPushButton\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nimport numpy as np\nfrom sklearn.neural_network import MLPClassifier\nimport pickle as cPickle\nimport csv\nimport os.path\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n\n self.setMinimumSize(QSize(800,600))\n self.content = QWidget()\n layout = QGridLayout()\n\n self.figure = plt.figure()\n\n self.canvas = FigureCanvas(self.figure)\n\n self.button = QPushButton('Add_Point')\n self.random_button = QPushButton('Predict_random_points')\n self.teach_NN_button = QPushButton('Teach_NN')\n self.load_csv_button = QPushButton('Load_csv')\n self.show_boundaries_button = QPushButton('Show_boundaries')\n self.clear_plot_button = QPushButton('Clear_plot')\n self.save_NN_button = QPushButton('Save_NN')\n self.load_NN_button = QPushButton('Load_NN')\n self.scale_plot_button = QPushButton('Scale_plot')\n\n layout.addWidget(self.button,2,0,1,2)\n layout.addWidget(self.random_button, 3, 0, 1, 2)\n layout.addWidget(self.show_boundaries_button,4, 0, 1, 2)\n layout.addWidget(self.clear_plot_button, 5, 0, 1, 2)\n layout.addWidget(self.load_csv_button, 14, 0, 1, 2)\n layout.addWidget(self.teach_NN_button, 16, 0, 1, 2)\n layout.addWidget(self.load_NN_button, 18, 0, 1, 2)\n layout.addWidget(self.save_NN_button, 20, 0, 1, 2)\n layout.addWidget(self.scale_plot_button,6 ,0 ,1 ,2)\n\n self.X1_input = QLineEdit('Input from -4 to 2')\n self.X2_input = QLineEdit('Input from 2 to 5')\n self.save_name_input = QLineEdit(\"save_NN_name\")\n self.load_name_input = QLineEdit(\"load_NN_name\")\n self.NN_sample_size = QLineEdit(\"NN_sample_size\")\n self.load_CSV_input = QLineEdit(\"load_CSV_file\")\n\n layout.addWidget(QLabel('X1:'), 0, 0)\n layout.addWidget(QLabel('X2:'), 1, 0)\n layout.addWidget(QLabel('NN_save_name:'), 21, 0)\n layout.addWidget(QLabel('NN_load_name:'), 19, 0)\n layout.addWidget(QLabel('NN_Sample_size'), 17, 0)\n layout.addWidget(QLabel('CSV_file_name'), 15, 0)\n layout.addWidget(self.X1_input, 0, 1)\n layout.addWidget(self.X2_input, 1, 1)\n layout.addWidget(self.load_name_input, 19, 1)\n layout.addWidget(self.save_name_input, 21, 1)\n layout.addWidget(self.NN_sample_size, 17, 1)\n layout.addWidget(self.load_CSV_input,15,1)\n\n spacer1 = QSpacerItem(1, 1, QSizePolicy.Minimum, QSizePolicy.Expanding)\n layout.addItem(spacer1, 9, 0, 1, 2)\n\n self.button.clicked.connect(self.plot)\n self.random_button.clicked.connect(self.predict_random_data)\n self.load_csv_button.clicked.connect(self.load_csv)\n self.show_boundaries_button.clicked.connect(self.show_boundaries)\n self.clear_plot_button.clicked.connect(self.clear_plot)\n self.save_NN_button.clicked.connect(self.save_NN)\n self.load_NN_button.clicked.connect(self.load_NN)\n self.teach_NN_button.clicked.connect(self.teach_NN)\n self.scale_plot_button.clicked.connect(self.scale_plot)\n\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n layout.addWidget(self.canvas, 0, 2, 22, 1)\n\n self.content.setLayout(layout)\n self.setCentralWidget(self.content)\n\n def plot(self):\n\n try:\n NN = clf\n\n try:\n X1 = float(self.X1_input.text())\n X2 = float(self.X2_input.text())\n\n if X1 < -4 or X1 > 2:\n QMessageBox.critical(self, 'Error', 'Wrong coefficients:\\nX1 out of (-4,2) interval')\n if X2 < 2 or X2 > 5:\n QMessageBox.critical(self, 'Error', 'Wrong coefficients:\\nX2 out of (2,5) interval')\n\n else:\n prediction = clf.predict([[X1, X2]])\n color = \"r\"\n if prediction < 1:\n color = \"b\"\n plt.scatter(X1, X2, c=color, edgecolors='black')\n plt.ylim(2, 5)\n plt.xlim(-4, 2)\n\n self.set_plot()\n self.canvas.draw()\n\n except ValueError as e:\n QMessageBox.critical(self, 'Error', 'Wrong coefficients:\\nValueError: ' + str(e))\n\n except:\n QMessageBox.critical(self, 'Error', 'No NN loaded')\n\n def teach_NN(self):\n\n try:\n float(self.NN_sample_size.text())\n if float(self.NN_sample_size.text()) < 1 or float(self.NN_sample_size.text()) > 3500:\n QMessageBox.critical(self, 'Error', \"Sample size too big or negative\")\n\n else:\n inputs, outputs = self.generate_random(int(self.NN_sample_size.text()))\n global clf\n clf = MLPClassifier(solver='lbfgs', alpha=0.9, max_iter=500000, learning_rate_init=0.01,\n hidden_layer_sizes=(30, 25, 20, 20, 20), random_state=1)\n clf.fit(inputs, outputs)\n except ValueError as e:\n QMessageBox.critical(self, 'Error', 'Wrong coefficient:' + str(e))\n\n def generate_random(self, sample_num):\n\n X1 = np.array([])\n X2 = np.array([])\n Y_output = np.array([])\n data = np.array([])\n it = 0\n\n for i in range(sample_num):\n x1 = np.random.uniform(-4, 2)\n x2 = np.random.uniform(2, 5)\n\n X1 = np.append(X1, x1)\n X2 = np.append(X2, x2)\n\n Y = 0.4444444 * (X1 + 2) ** 2 + 2.3668639 * (X2 - 3) ** 2\n\n for i in Y:\n if i < 1:\n output = 1\n else:\n output = 0\n Y_output = np.append(Y_output, output)\n\n for i in X1:\n data = np.append(data, [X1[it], X2[it]])\n it = it + 1\n\n data = np.reshape(data, (-1, 2))\n\n return data, Y_output\n\n def predict_random_data(self):\n\n pred_data, Ys = self.generate_random(100)\n self.predict_data(pred_data)\n\n def predict_data(self, pred_data):\n\n try:\n\n prediction = clf.predict(pred_data)\n\n for i in range(len(pred_data)):\n point = pred_data[i]\n color = \"r\"\n if prediction[i] < 1:\n color = \"b\"\n plt.scatter(point[0], point[1], c=color, edgecolors='black')\n\n self.set_plot()\n self.canvas.draw()\n\n except:\n QMessageBox.critical(self, 'Error', 'No NN loaded')\n\n def load_csv(self):\n\n try:\n NN = clf\n\n if os.path.isfile(str(self.load_CSV_input.text() + \".csv\")):\n\n filename = str(self.load_CSV_input.text() + \".csv\")\n X1 = []\n X2 = []\n data = np.array([])\n\n with open(filename, 'r') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\";\")\n\n try:\n\n for row in csvreader:\n\n X1.append(float(row[0].replace(',', \".\")))\n X2.append(float(row[1].replace(',', \".\")))\n\n for i in range(len(X1)):\n data = np.append(data, [X1[i], X2[i]])\n data = np.reshape(data, (-1, 2))\n\n self.predict_data(data)\n output = clf.predict(data)\n self.save_csv(X1, X2, output)\n\n except:\n QMessageBox.critical(self, 'Error', 'CSV file error')\n\n else:\n QMessageBox.critical(self, 'Error', 'File not found')\n\n except:\n QMessageBox.critical(self, 'Error', 'No NN loaded')\n\n def save_csv(self,X1,X2,output):\n\n fields = [\"X1\",\"X2\",\"Prediction\"]\n filename = \"NN_prediction.csv\"\n\n with open(filename, \"w\") as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter=\";\")\n csvwriter.writerow(fields)\n\n for i in range(len(X1)):\n csvwriter.writerow([str(X1[i]), str(X2[i]), str(output[i])])\n\n def show_boundaries(self):\n\n try:\n\n h = .01\n xx, yy = np.meshgrid(np.arange(-4, 2, h),\n np.arange(2, 5, h))\n\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plt.contourf(xx, yy, Z, cmap=plt.cm.jet, alpha=0.5)\n\n self.set_plot()\n self.canvas.draw()\n\n except:\n QMessageBox.critical(self, 'Error', 'No NN loaded')\n\n def clear_plot(self):\n\n plt.clf()\n self.set_plot()\n self.canvas.draw()\n\n def set_plot(self):\n\n plt.ylim(2, 5)\n plt.xlim(-4, 2)\n plt.xlabel(\"X1\")\n plt.ylabel(\"X2\")\n blue = mlines.Line2D([], [], color='blue', marker='o', linestyle='None',\n markersize=10, label='Outside')\n red = mlines.Line2D([], [], color='red', marker='o', linestyle='None',\n markersize=10, label='Inside')\n plt.legend(handles=[red,blue], loc = \"upper right\")\n\n def save_NN(self):\n\n try:\n\n NN = clf\n with open(str(self.save_name_input.text()) + \".pkl\", 'wb') as fid:\n cPickle.dump(clf, fid)\n\n except:\n QMessageBox.critical(self, 'Error', 'No NN loaded')\n\n def load_NN(self):\n\n try:\n\n global clf\n with open(str(self.load_name_input.text()) + \".pkl\", 'rb') as fid:\n clf = cPickle.load(fid)\n\n except:\n QMessageBox.critical(self, 'Error', 'File not found')\n\n def scale_plot(self):\n plt.axis(\"scaled\")\n self.set_plot()\n self.canvas.draw()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main = MainWindow()\n main.show()\n sys.exit(app.exec_())\n","repo_name":"jmachala/Projects","sub_path":"ANN_classification_JM.py","file_name":"ANN_classification_JM.py","file_ext":"py","file_size_in_byte":10256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71220178643","text":"# pyuic5 -o gui.py untitled.ui\nimport cv2, sys, yaml, os, torch, time\nimport numpy as np\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom gui import Ui_Dialog\nfrom models.fastsegfomer.fastsegformer import FastSegFormer\nfrom utils.utils import *\nimport onnxruntime\n\n\n\n\ndef resize_img(img, img_size=600, value=[255, 255, 255], inter=cv2.INTER_AREA):\n old_shape = img.shape[:2]\n ratio = img_size / max(old_shape)\n new_shape = [int(s * ratio) for s in old_shape[:2]]\n img = cv2.resize(img, (new_shape[1], new_shape[0]), interpolation=inter)\n delta_h, delta_w = img_size - new_shape[0], img_size - new_shape[1]\n top, bottom = delta_h // 2, delta_h - delta_h // 2\n left, right = delta_w // 2, delta_w - delta_w // 2\n img = cv2.copyMakeBorder(img, int(top), int(bottom), int(left), int(right), borderType=cv2.BORDER_CONSTANT,\n value=value)\n return img\n\n\nclass MyForm(QDialog):\n \n # 定义一个自定义信号,用于在文本更新时发出信号\n text_update_signal = QtCore.pyqtSignal(str)\n \n def __init__(self, title, textBrowser_size):\n super().__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n \n self.save_path = 'result'\n self.save_id = 0\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path, exist_ok=True)\n self.now = None\n self.model = None\n self.video_count = None\n self._timer = None\n self.out = None\n \n self.ui.textBrowser.setFontPointSize(textBrowser_size)\n self.ui.label.setText(title)\n \n self.ui.pushButton_Model.clicked.connect(self.select_model)\n self.ui.pushButton_Img.clicked.connect(self.select_image_file)\n self.ui.pushButton_ImgFolder.clicked.connect(self.select_folder_file)\n self.ui.pushButton_Video.clicked.connect(self.select_video_file)\n self.ui.pushButton_Camera.clicked.connect(self.select_camera)\n self.ui.pushButton_BegDet.clicked.connect(self.begin_detect)\n self.ui.pushButton_Exit.clicked.connect(self._exit)\n self.ui.pushButton_SavePath.clicked.connect(self.select_savepath)\n self.ui.pushButton_StopDet.clicked.connect(self.stop_detect)\n self.ui.comboBox.currentIndexChanged.connect(self.comboBox_vis)\n self.show()\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.cfg = None\n self.weight_type = None\n self.info = 'red: sunburn; green: ulcer; orange: wind scarring'\n self.text_update_signal.connect(self.update_text) # 将信号与槽函数关联\n self.fps_all = 0\n self.frame_all = 0\n \n def update_text(self, message):\n self.ui.textBrowser.append(message) # 更新文本\n\n def read_and_show_image_from_path(self, image_path):\n image = cv2.imdecode(np.fromfile(image_path, np.uint8), cv2.IMREAD_COLOR)\n resize_image = cv2.cvtColor(resize_img(image), cv2.COLOR_RGB2BGR)\n self.ui.label_ori.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(resize_image.data, resize_image.shape[1], resize_image.shape[0], QtGui.QImage.Format_RGB888)))\n return image\n \n def show_image_from_array(self, image, ori=False, det=False):\n # QT的setPixmap读取的为BGR格式\n resize_image = cv2.cvtColor(resize_img(image), cv2.COLOR_RGB2BGR)\n if ori:\n self.ui.label_ori.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(resize_image.data, resize_image.shape[1], resize_image.shape[0], QtGui.QImage.Format_RGB888)))\n if det:\n self.ui.label_det.setPixmap(QtGui.QPixmap.fromImage(QtGui.QImage(resize_image.data, resize_image.shape[1], resize_image.shape[0], QtGui.QImage.Format_RGB888)))\n \n def show_message(self, message):\n QMessageBox.information(self, \"提示\", message, QMessageBox.Ok)\n \n def reset_timer(self):\n self._timer.stop()\n self._timer = None\n \n def reset_video_count(self):\n if self.video_count is not None:\n self.video_count = None\n \n def reset_det_label(self):\n self.ui.label_det.setText('')\n \n def comboBox_vis(self):\n self.ui.textBrowser.append(f'track state change to {self.ui.comboBox.currentText()}')\n self.track_init()\n \n def track_init(self):\n if self.ui.comboBox.currentText() != 'NoTrack':\n self.model.track_init(self.ui.comboBox.currentText())\n \n def select_model(self):\n fileName, fileType = QFileDialog.getOpenFileName(self, '选取文件', '.', 'YAML (*.yaml)')\n if fileName != '':\n self.ui.textBrowser.append(f'load yaml form {fileName}.')\n # read cfg\n with open(fileName) as f:\n self.cfg = yaml.load(f, Loader=yaml.SafeLoader)\n # init FastSegFormer-p model\n if self.cfg['model_type'] == 'FastSegFormer_P':\n self.model = FastSegFormer(num_classes=self.cfg['num_classes'], pretrained=False, backbone='poolformer_s12', Pyramid='multiscale', cnn_branch=True).to(self.device).eval()\n if self.cfg['model_path'].endswith('pth'):\n self.weight_type = 'pth'\n checkpoint = torch.load(self.cfg['model_path'], map_location=self.device)\n self.model.load_state_dict(checkpoint)\n elif self.cfg['model_path'].endswith('onnx'):\n self.weight_type = 'onnx'\n # providers = ['CPUExecutionProvider']\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']\n self.model = onnxruntime.InferenceSession(self.cfg['model_path'], providers=providers)\n else:\n self.ui.textBrowser.append(f'load yaml failure.')\n self.ui.textBrowser.append(f'load yaml success.')\n else:\n self.ui.textBrowser.append(f'load yaml failure.')\n self.show_message('请选择yaml配置文件.')\n \n def select_image_file(self):\n fileName, fileType = QFileDialog.getOpenFileName(self, '选取文件', '.', 'JPG (*.jpg);;PNG (*.png)')\n if fileName != '':\n self.reset_det_label()\n image = self.read_and_show_image_from_path(fileName)\n self.now = image\n self.ui.textBrowser.append(f'read image form {fileName}')\n else:\n self.show_message('请选择图片文件.')\n\n def select_folder_file(self):\n folder = QFileDialog.getExistingDirectory(self, '选择路径', '.')\n if folder != '':\n folder_list = [os.path.join(folder, i) for i in os.listdir(folder)]\n if len(folder_list) == 0:\n self.show_message('选择的文件夹内容为空.')\n else:\n self.reset_det_label()\n self.now = folder_list\n self.read_and_show_image_from_path(folder_list[0])\n self.ui.textBrowser.append(f'read folder form {folder}')\n else:\n self.show_message('请选择图片文件夹.')\n \n def select_video_file(self):\n fileName, fileType = QFileDialog.getOpenFileName(self, '选取文件', '.', 'MP4 (*.mp4)')\n cap = cv2.VideoCapture(fileName)\n \n if self._timer is not None:\n self.reset_timer()\n \n if not cap.isOpened():\n self.show_message('MP4视频打开失败!')\n else:\n self.reset_det_label()\n flag, image = cap.read()\n self.show_image_from_array(image, ori=True)\n self.now = cap\n self.video_count = int(self.now.get(cv2.CAP_PROP_FRAME_COUNT))\n self.print_id = 1\n\n def select_camera(self):\n cap = cv2.VideoCapture(0)\n \n if self._timer is not None:\n self.reset_timer()\n \n if not cap.isOpened():\n self.show_message('视频打开失败.')\n else:\n self.reset_det_label()\n flag, image = cap.read()\n self.show_image_from_array(image, ori=True)\n self.now = cap\n self.print_id = 1\n \n def begin_detect(self):\n if self.model is None:\n self.show_message('请先选择模型yaml配置文件.')\n \n if self._timer is not None:\n self.reset_timer()\n \n if type(self.now) is cv2.VideoCapture:\n \n \"\"\"\n 处理视频流\n \"\"\"\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n size = (int(self.now.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.now.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n if self.out is None:\n self.out = cv2.VideoWriter(os.path.join(self.save_path, f'{self.save_id}.mp4'), fourcc, 30.0, size)\n \n self.track_init()\n self._timer = QTimer(self)\n # 启动新的线程来处理视频帧\n video_thread = threading.Thread(target=self.show_video)\n video_thread.start()\n # self._timer.timeout.connect(self.show_video)\n # self._timer.start(20)\n elif type(self.now) is list:\n \"\"\"\n 处理列表图像\n \"\"\"\n self.print_id, self.folder_len = 1, len(self.now)\n self._timer = QTimer(self)\n self._timer.timeout.connect(self.show_folder)\n self._timer.start(20)\n else:\n \"\"\"\n 处理单张图像\n \"\"\"\n torch.cuda.synchronize()\n since = time.time()\n result, image_det = detect_image(model=self.model, image=self.now, name_classes=self.cfg['name_classes'], num_classes=self.cfg['num_classes'], input_shape=self.cfg['input_shape'], device=self.device, weight_type=self.weight_type)\n torch.cuda.synchronize()\n end = time.time()\n cv2.imencode(\".jpg\", image_det)[1].tofile(os.path.join(self.save_path, f'{self.save_id}.jpg'))\n self.ui.textBrowser.append(f'time:{end-since:.5f}s save image in {os.path.join(self.save_path, f\"{self.save_id}.jpg\")}\\n' + self.info)\n self.save_id += 1\n self.show_image_from_array(image_det, det=True)\n \n def stop_detect(self):\n if self._timer is not None:\n self.reset_timer()\n \n def select_savepath(self):\n folder = QFileDialog.getExistingDirectory(self, '选择路径', '.')\n self.save_path = folder\n self.save_id = 0\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path, exist_ok=True)\n \n def show_folder(self):\n if len(self.now) == 0:\n self.reset_timer()\n else:\n img_path = self.now[0]\n image = self.read_and_show_image_from_path(img_path)\n torch.cuda.synchronize()\n since = time.time()\n result, image_det = detect_image(model=self.model, image=image, name_classes=self.cfg['name_classes'], num_classes=self.cfg['num_classes'], input_shape=self.cfg['input_shape'], device=self.device, weight_type=self.weight_type)\n torch.cuda.synchronize()\n end = time.time()\n cv2.imencode(\".jpg\", image_det)[1].tofile(os.path.join(self.save_path, f'{self.save_id}.jpg'))\n # self.ui.textBrowser.append(f'time:{end-since:.5f}s {self.print_id}/{self.folder_len} save image in {os.path.join(self.save_path, f\"{self.save_id}.jpg\")}')\n self.text_update_signal.emit(f'time:{end-since:.5f}s {self.print_id}/{self.folder_len} save image in {os.path.join(self.save_path, f\"{self.save_id}.jpg\")}')\n self.show_image_from_array(image_det, det=True)\n self.print_id += 1\n self.save_id += 1\n self.now.pop(0)\n \n \n def show_video(self):\n while self.now is not None:\n flag, image = self.now.read()\n if flag:\n self.frame_all += 1\n self.show_image_from_array(image, ori=True)\n torch.cuda.synchronize()\n since = time.time()\n seg_img, image_det = detect_image(model=self.model, image=image, name_classes=self.cfg['name_classes'], num_classes=self.cfg['num_classes'], input_shape=self.cfg['input_shape'], device=self.device, weight_type=self.weight_type)\n if self.ui.comboBox.currentText() != 'NoTrack':\n image_det = self.model.track_processing(image.copy(), seg_img)\n torch.cuda.synchronize()\n end = time.time()\n self.out.write(image_det)\n self.show_image_from_array(image_det, det=True)\n fps = 1/(end-since)\n self.fps_all += fps\n if self.video_count is not None:\n self.text_update_signal.emit(f'{self.print_id}/{self.video_count} Frames. time:{end-since:.5f}s fps:{1 / (end-since):.3f}' + self.info)\n # self.ui.textBrowser.append(f'{self.print_id}/{self.video_count} Frames. time:{end-since:.5f}s fps:{1 / (end-since):.3f}' + self.info)\n else:\n self.text_update_signal.emit(f'{self.print_id} Frames. time:{end-since:.5f}s fps:{1 / (end-since):.3f}' + self.info)\n # self.ui.textBrowser.append(f'{self.print_id} Frames. time:{end-since:.5f}s fps:{1 / (end-since):.3f}' + self.info)\n self.print_id += 1\n \n else:\n print(f'Average fps:{self.fps_all/self.frame_all:.3f}')\n self.now = None\n self.reset_timer()\n self.out.release()\n self.out = None\n self.reset_video_count()\n self.save_id += 1\n \n\n def _exit(self):\n self.close()\n\nif __name__ == '__main__':\n gui_title = 'FastSegFormer-VisionSystem'\n textBrowser_size = 15\n \n app = QApplication(sys.argv)\n w = MyForm(title=gui_title, textBrowser_size=textBrowser_size)\n sys.exit(app.exec_())","repo_name":"caixiongjiang/FastSegFormer-pyqt","sub_path":"run_gui.py","file_name":"run_gui.py","file_ext":"py","file_size_in_byte":14059,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"43598651699","text":"import sys, os\nimport argparse\n\nimport socketio\nimport defaults\nimport log\nlogger = log.get_module_logger(__name__)\n\nclass ArgumentParser(argparse.ArgumentParser):\n def error(self, message):\n \"\"\"Redefinition to print full help message on any errors\"\"\"\n self.print_help(sys.stderr)\n self.exit(2, '%s: error: %s\\n' % (self.prog, message))\n\nclass DNSLookupAction(argparse.Action):\n \"\"\"perform DNS query for each host passed to arg\"\"\"\n def __call__(self, parser, args, values, option_string=None):\n hosts = []\n for v in values:\n hosts += socketio.get_hosts_by_dns(v)\n setattr(args, self.dest, hosts)\n\ndef register_computeaddress_args(parser):\n parser.add_argument('--computehosts', '--ch', type=str, nargs='+',\n default=defaults._cs_hosts,\n help='list of compute server addresses')\n parser.add_argument('--computeport', '--cp', type=int,\n default=defaults._cs_port,\n help='Compute server port')\n\ndef register_db_args(parser):\n parser.add_argument('--dbhost', type=str, help='define the host for mongodb connections',\n default=defaults.db_address[0])\n parser.add_argument('--dbport', type=int, help='define the port for mongodb connections',\n default=defaults.db_address[1])\n parser.add_argument('--dbname', type=str, help='define the database to use',\n default=defaults.db_name)\n parser.add_argument('--dbauth', type=str, nargs=2, metavar=('user', 'pass'), help='database authentication',\n default=defaults.db_auth)\n","repo_name":"qihuilyu/P2T","sub_path":"MC simulation/dosecalc/webapi/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33318641925","text":"import sqlite3\nfrom datetime import datetime, timedelta\n\nclass Database:\n def __init__(self, db_name='my_database.db'):\n self.connection = sqlite3.connect(db_name)\n self.cursor = self.connection.cursor()\n self.create_table()\n\n def create_table(self):\n self.cursor.execute('''\n CREATE TABLE IF NOT EXISTS records (\n id INTEGER PRIMARY KEY,\n date TEXT,\n from_time TEXT,\n to_time TEXT,\n task TEXT,\n tag TEXT\n )\n ''')\n self.connection.commit()\n\n def record_data(self, date, from_time, to_time, task, tag):\n self.cursor.execute('''\n INSERT INTO records (date, from_time, to_time, task, tag)\n VALUES (?, ?, ?, ?, ?)\n ''', (date, from_time, to_time, task, tag))\n self.connection.commit()\n\n def query_data(self, query_object):\n return query_object.execute(self)\n\n def close_connection(self):\n self.connection.close()\n\nclass Record:\n def __init__(self, date, from_time, to_time, task, tag):\n self.date = self.convert_date(date)\n self.from_time = from_time\n self.to_time = to_time\n self.task = task\n self.tag = tag\n\n def convert_date(self, date):\n if date.lower() == 'today':\n return datetime.now().strftime('%Y/%m/%d')\n elif date.lower() == 'yesterday':\n yesterday = datetime.now() - timedelta(days=1)\n return yesterday.strftime('%Y/%m/%d')\n else:\n return date\n\nclass QueryTask:\n def __init__(self, task):\n self.task = task\n\n def execute(self, database):\n database.cursor.execute('''\n SELECT * FROM records WHERE task = ?\n ''', (self.task,))\n return database.cursor.fetchall()\n\nclass QueryTag:\n def __init__(self, tag):\n self.tag = tag\n\n def execute(self, database):\n database.cursor.execute('''\n SELECT * FROM records WHERE tag = ?\n ''', (self.tag,))\n return database.cursor.fetchall()\n\nclass QueryDate:\n def __init__(self, date):\n self.date = self.convert_date(date)\n\n def convert_date(self, date):\n if date.lower() == 'today':\n return datetime.now().strftime('%Y/%m/%d')\n elif date.lower() == 'yesterday':\n yesterday = datetime.now() - timedelta(days=1)\n return yesterday.strftime('%Y/%m/%d')\n else:\n return date\n\n def execute(self, database):\n database.cursor.execute('''\n SELECT * FROM records WHERE date = ?\n ''', (self.date,))\n return database.cursor.fetchall()\n\nclass CommandLoop:\n def __init__(self, main_app):\n self.main_app = main_app\n\n def start(self):\n while True:\n command = input(\"Enter command: \")\n if command.lower() == 'exit':\n self.main_app.close()\n break\n else:\n self.main_app.process_command(command)\n\nclass QueryHandler:\n def query_task(self, task, database):\n query_object = QueryTask(task)\n result = query_object.execute(database)\n\n if result:\n print(\"Matching records for task {}: \".format(task))\n for record in result:\n print(record)\n else:\n print(\"No records found for the given task.\")\n\n def query_tag(self, tag, database):\n query_object = QueryTag(tag)\n result = query_object.execute(database)\n\n if result:\n print(\"Matching records for tag {}: \".format(tag))\n for record in result:\n print(record)\n else:\n print(\"No records found for the given tag.\")\n\n def query_date(self, date, database):\n query_object = QueryDate(date)\n result = query_object.execute(database)\n\n if result:\n print(\"Matching records for date {}: \".format(date))\n for record in result:\n print(record)\n else:\n print(\"No records found for the given date.\")\n\nclass MainApp:\n def __init__(self):\n self.db = Database()\n self.query_handler = QueryHandler()\n\n def process_command(self, command):\n parts = command.split()\n operation = parts[0].lower()\n\n if operation == 'record':\n self.record(parts[1:])\n elif operation == 'querytask':\n self.query_handler.query_task(parts[1], self.db)\n elif operation == 'querytag':\n self.query_handler.query_tag(parts[1], self.db)\n elif operation == 'querydate':\n self.query_handler.query_date(parts[1], self.db)\n else:\n print(\"Invalid command. Supported commands: record, querytask, querytag, querydate\")\n\n def record(self, data):\n if len(data) != 5:\n print(\"Invalid record command. Use: record DATE FROMTIME TOTIME TASK TAG\")\n return\n\n record_object = Record(*data)\n self.db.record_data(record_object.date, record_object.from_time, record_object.to_time, record_object.task, record_object.tag)\n print(\"Record added successfully.\")\n\n def close(self):\n self.db.close_connection()\n\nif __name__ == \"__main__\":\n main_app = MainApp()\n command_loop = CommandLoop(main_app)\n command_loop.start()\n","repo_name":"lohnerj/ase420individual","sub_path":"Artifacts/v1/mainv1.py","file_name":"mainv1.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24646100684","text":"import os as oo \nimport cx_Oracle\nimport getpass\n\n#Utilização da biblioteca getpass para mascarar da senha da conexão\ndef masked_input(prompt=''):\n password=[]\n while True:\n key= getpass.getpass(prompt='',stream=None)\n if not key:\n break\n password.append(key)\n print('*'*len(key),end='',flush=True)\n print()\n return ''.join(password)\n\n# Inserção de dados para conexão com banco de dados\nhost = input(\"Informe o nome do host: \")\nport = input(\"Informe o número da porta: \")\nservice_name = input(\"Informe o nome do serviço do banco de dados: \")\nusername = input(\"Informe o nome do usuário: \")\nprint(\"\\nInforme a senha do usuário:\")\npassword = masked_input(\"Informe a senha do usuário: \")\n\n# Cria uma conexão com o banco de dados\ndsn_tns = cx_Oracle.makedsn(host, port, service_name=service_name)\nconn = cx_Oracle.connect(username, password, dsn_tns)\n\ncur = conn.cursor()\n\n# Criação da tabela em background para inserção dos parâmetros\nsqlParam= 'create table qualidade_ar (mp10 integer, mp25 integer, o3 integer, co integer, no2 integer, so2 integer)'\n\ncur.execute(sqlParam)\n\nconn.commit()\n\noo.system('cls')\n\nflag=True\n\n# Definição do Menu principal\n\nwhile flag==True:\n print(\"\\nBem vindo ao sistema! Selecione uma opção para continuar: \")\n print(\"\\n Opção 1. Inserir Amostras\")\n print(\"\\n Opção 2. Alterar Amostras\")\n print(\"\\n Opção 3. Apagar Amostras\")\n print(\"\\n Opção 4. Classificar Amostras\")\n print(\"\\n Opção 0. Sair do Sistema\")\n\n opt=(int)(input(\"Selecione uma opção para continuar: \"))\n\n while opt!=0:\n # Inserção dos valores de uma amostra\n if opt==1:\n oo.system('cls')\n print(\"\\nPor favor, insira as amostras:\")\n mp10 = float(input(\"Informe o valor de MP10: \"))\n mp25 = float(input(\"Informe o valor de MP2,5: \"))\n o3 = float(input(\"Informe o valor de O3: \"))\n co = float(input(\"Informe o valor de CO: \"))\n no2 = float(input(\"Informe o valor de NO2: \"))\n so2 = float(input(\"Informe o valor de SO2: \"))\n \n sqlInsert = 'insert into qualidade_ar (mp10, mp25, o3, co, no2, so2) VALUES (:mp10, :mp25, :o3, :co, :no2, :so2)'\n \n dados= {'mp10': mp10, 'mp25':mp25, 'o3':o3, 'co':co, 'no2':no2, 'so2':so2}\n \n cur.execute(sqlInsert,dados)\n \n conn.commit()\n \n break\n\n \n if opt==2:\n oo.system('cls')\n # Atualização dos valores de uma amostra\n print(\"\\n\")\n sqlSelect= 'select mp10, mp25, o3, co, no2, so2 from qualidade_ar'\n cur.execute(sqlSelect)\n \n nomeC = [desc[0] for desc in cur.description]\n print(\"\\n\",nomeC)\n\n # Mostra o valor dos dados\n rows = cur.fetchall()\n\n for row in rows:\n print(\"\\n\",row)\n\n conn.commit()\n print(\"\\n\")\n # Usuário escolhe a coluna que deseja alterar por rótulo e valor anterior\n columnname=str(input(\"Qual coluna deseja alterar?: \"))\n \n if columnname==\"mp10\": \n mp10 = float(input(\"Informe o novo valor de MP10: \"))\n mp10old=float(input(\"Infrome o antigo valor de MP10: \"))\n\n sqlupdate0=\"update qualidade_ar set mp10 = :mp10new where mp10 = :mp10old\"\n data0={'mp10new':mp10, 'mp10old':mp10old}\n cur.execute(sqlupdate0,data0)\n conn.commit()\n\n print(\"\\nValores da amostra atualizados!\")\n print(\"\\n\")\n break\n \n if columnname==\"mp25\":\n mp25 = float(input(\"Informe o novo valor de MP2,5: \"))\n mp25old=float(input(\"Informe o valor antigo de mp2,5: \"))\n sqlupdate1=\"update qualidade_ar set mp25 = :mp25new where mp25 = :mp25old\"\n data1={'mp25new':mp25, 'mp25old':mp25old}\n cur.execute(sqlupdate1,data1)\n conn.commit()\n\n print(\"\\nValores da amostra atualizados!\")\n print(\"\\n\")\n break\n\n if columnname==\"o3\":\n o3 = float(input(\"Informe o novo valor de O3: \"))\n o3old=float(input(\"Informe o valor antigo de O3: \"))\n sqlupdate2=\"update qualidade_ar set o3 = :o3new where o3 = :o3old\"\n data2={'o3new':o3, 'o3old':o3old}\n cur.execute(sqlupdate2,data2)\n conn.commit()\n\n print(\"\\nValores da amostra atualizados!\")\n print(\"\\n\")\n break\n\n if columnname==\"co\":\n co = float(input(\"Informe o novo valor de CO: \"))\n coold=float(input(\"Informe o valor antigo de CO: \"))\n sqlupdate3=\"update qualidade_ar set co = :conew where co = :coold\"\n data3={'conew':co, 'coold':coold}\n cur.execute(sqlupdate3,data3)\n conn.commit()\n\n print(\"\\nValores da amostra atualizados!\")\n print(\"\\n\")\n break\n\n if columnname==\"no2\":\n no2 = float(input(\"Informe o novo valor de NO2: \"))\n no2old= float(input(\"Informe o valor antigo de NO2: \"))\n sqlupdate4=\"update qualidade_ar set no2 = :no2new where no2 = :no2old\"\n data4={'no2new':no2, 'no2old':no2old}\n cur.execute(sqlupdate4,data4)\n conn.commit()\n\n print(\"\\nValores da amostra atualizados!\")\n print(\"\\n\")\n break\n\n if columnname==\"so2\":\n so2 = float(input(\"Informe o novo valor de SO2: \"))\n so2old= float(input(\"Informe o valor antigo de SO2: \"))\n sqlupdate5=\"update qualidade_ar set so2 = :so2new where so2 = :so2old\"\n data5={'so2new':so2, 'so2old':so2old}\n cur.execute(sqlupdate5,data5)\n conn.commit()\n \n print(\"\\nValores da amostra atualizados!\")\n print(\"\\n\")\n break\n \n if opt==3:\n oo.system('cls')\n print(\"\\n\")\n # Atualização dos valores de uma amostra\n print(\"\\n\")\n sqlSelect= 'select mp10, mp25, o3, co, no2, so2 from qualidade_ar'\n cur.execute(sqlSelect)\n \n nomeC = [desc[0] for desc in cur.description]\n print(\"\\n\",nomeC)\n\n # Mostra o valor dos dados\n rows = cur.fetchall()\n\n for row in rows:\n print(\"\\n\",row)\n\n conn.commit()\n \n print(\"\\n\")\n sampledelete=str(input(\"Qual amostra deseja apagar?: \"))\n dataname=float(input(\"\\nQual dado da coluna deseja apagar?: \"))\n if sampledelete==(\"mp10\"):\n sqldeletemp10='update qualidade_ar set mp10 = 0 where mp10 = :mp10delete'\n mp10deletecomm={'mp10delete':dataname}\n cur.execute(sqldeletemp10,mp10deletecomm)\n conn.commit()\n\n elif sampledelete==(\"mp25\"):\n sqldeletemp25='update qualidade_ar set mp25 = 0 where mp25 = :mp25delete'\n mp25deletecomm={'mp25delete':dataname}\n cur.execute(sqldeletemp25,mp25deletecomm)\n conn.commit()\n\n elif sampledelete==(\"o3\"):\n sqldeleteo3='update qualidade_ar set o3 = 0 where o3 = :o3delete'\n o3deletecomm={'o3delete':dataname}\n cur.execute(sqldeleteo3,o3deletecomm)\n conn.commit()\n\n elif sampledelete==(\"co\"):\n sqldeleteco='update qualidade_ar set co = 0 where co = :codelete'\n codeletecomm={'codelete':dataname}\n cur.execute(sqldeleteco,codeletecomm)\n conn.commit()\n\n elif sampledelete==(\"no2\"):\n sqldeleteno2='update qualidade_ar set no2 = 0 where no2 = :no2delete'\n no2deletecomm={'no2delete':no2deletecomm}\n cur.execute(sqldeleteno2,no2deletecomm)\n conn.commit()\n\n elif sampledelete==(\"so2\"):\n sqldeleteso2='update qualidade_ar set so2 = 0 where so2 = :so2delete'\n so2deletecomm={'so2delete':so2deletecomm}\n cur.execute(sqldeleteso2,so2deletecomm)\n conn.commit()\n print(\"\\nAmostra apagada!\")\n print(\"\\n\")\n break\n\n if opt==4:\n oo.system('cls')\n print(\"\\n\")\n\n sqlSelect2= 'select mp10, mp25, o3, co, no2, so2 from qualidade_ar'\n cur.execute(sqlSelect2)\n \n nomeC2 = [desc[0] for desc in cur.description]\n print(\"\\n\",nomeC2)\n\n# Mostra o valor dos dados\n rows = cur.fetchall()\n\n for row in rows:\n print(\"\\n\",row)\n \n# Calcula a media de cada coluna\n for i in range(len(nomeC2)):\n cur.execute(f\"SELECT AVG({nomeC2[i]}) FROM qualidade_ar\")\n med = cur.fetchone()[0]\n print(f\"[\\nMédia de {nomeC2[i]}: {med}\")\n\n conn.commit()\n\n # Leitura dos valores das amostras (Entradas)\n cur.execute('select avg(mp10) from qualidade_ar')\n mp10 = cur.fetchone()[0]\n\n cur.execute('select avg(mp25) from qualidade_ar')\n mp25 = cur.fetchone()[0]\n\n cur.execute('select avg(o3) from qualidade_ar')\n o3 = cur.fetchone()[0]\n\n cur.execute('select avg(co) from qualidade_ar')\n co = cur.fetchone()[0]\n\n cur.execute('select avg(no2) from qualidade_ar')\n no2 = cur.fetchone()[0]\n\n cur.execute('select avg(so2) from qualidade_ar')\n so2 = cur.fetchone()[0]\n\n \n if (mp10 < 0) and (mp25 <= 0) and (o3 <= 0) and (co <= 0) and (no2 <= 0) and (so2 <= 0):\n \n print(\"Valores inválidos!\")\n \n\n # Verificação da qualidade do ar com base nos valores informados\n # Exemplo: Se todos os valores da linha estiverem de acordo com a classificação de boa então imprime \"Qualidade do ar: Boa\"!\n # Porém se algum valor estourar o limite, passa para a qualidade seguinte adequada.\n if (0 <= mp10 <= 50) and (0 <= mp25 <= 25) and (0<= o3 <= 100) and (0<= co <= 9) and (0<= no2 <= 200) and (0<= so2 <= 20):\n \n print(\"\\nQualidade do ar: Boa\")\n print (\"\\n\\n\")\n \n\n elif (50 < mp10 <= 100) or (25 < mp25 <= 50) or (100 < o3 <= 130) or (9 < co <= 11) or (200 < no2 <= 240) or (20 < so2 <= 40):\n \n print(\"\\nQualidade do ar: Moderada\")\n print (\"\\n\")\n print (\"Pessoas de grupos sensíveis(crianças, idosos e pessoas com doenças respiratórias e cardíacas) podem apresentar sintomas como tosse seca e cansaço. A populaçao em geral nao é afetada\")\n print (\"\\n\")\n \n \n elif (100 < mp10 <= 150) or (50 < mp25 <= 75) or (130 < o3 <= 160) or (11 < co <= 13) or (240 < no2 <= 320) or (40 < so2 <= 365):\n \n print(\"\\nQualidade do ar: Ruim\")\n print (\"\\n\\n\")\n print (\"Toda a população pode apresentar sintomas como tosse seca, cansaço, ardor nos olhos, nariz e garganta. Pessoas de grupos sensíveis(crianças, idosos e pessoas com doenças resiratórias e cardíacas) podem apresentar efeitos mais sérios na saúde\")\n print (\"\\n\")\n \n \n elif (150 < mp10 <= 250) or (75 < mp25 <= 125) or (160 < o3 <= 200) or (13 < co <= 15) or (320 < no2 <= 1130) or (365 < so2 <= 800):\n \n print(\"\\nQualidade do ar: Muito ruim\")\n print (\"\\n\\n\")\n print (\"Toda a população pode aresentar sintomas como tosse seca, cansaço, ardor nos olhos, nariz, e garganta e ainda falta de ar e respiração ofegante. Efeitos ainda mais graves à saúde de grupos sensíveis(crianças, idosos e pessoas com doenças respiratórias e cardíacas)\")\n print (\"\\n\")\n \n \n elif (250 < mp10) or (125 < mp25) or (200 < o3) or (15 < co) or (1130 < no2) or (800 < so2):\n \n print(\"\\nQualidade do ar: Pessima\")\n print (\"\\n\\n\")\n print (\"Toda a população pode apresentar sérios risco de manifestações de doenças respiratórias e cardiovasculares. Aumento de mortes prematuras em pessoas de rupos sensíveis\")\n print (\"\\n\")\n \n break\n\n if opt==0:\n print(\"\\nObrigado por utilizar o sistema! Nos vemos em breve!\")\n break\n \nif opt==0:\n flag==False\n \n cur.close()\n conn.close()\n \n \n\n\n\n","repo_name":"Time2PI/PI-SistemaQualidadeDoAr","sub_path":"Fase_tresP1_projeto_integrador.py","file_name":"Fase_tresP1_projeto_integrador.py","file_ext":"py","file_size_in_byte":13123,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27637918752","text":"#!/usr/bin/python3\n\n\n\"\"\"\nThis module defines a class BaseGeometry with an area() method.\n\"\"\"\n\n\nclass BaseGeometry:\n def area(self):\n \"\"\"\n Calculate the area (not implemented).\n\n Raises:\n Exception: This method is not implemented.\n \"\"\"\n raise Exception(\"area() is not implemented\")\n\n\nif __name__ == \"__main__\":\n bg = BaseGeometry()\n\n try:\n print(bg.area())\n except Exception as e:\n print(\"[{}] {}\".format(e.__class__.__name__, e))\n","repo_name":"brucetravis/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/6-base_geometry.py","file_name":"6-base_geometry.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7681723356","text":"import pandas as p\r\nimport statsmodels.tsa.stattools as sm\r\nimport statsmodels.graphics.tsaplots as s\r\nimport statsmodels.regression as lm\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom numpy.core.multiarray import ndarray\r\nfrom sklearn import datasets, linear_model\r\nfrom statsmodels.tsa.arima_model import ARIMA as a\r\nimport statsmodels.tsa.ar_model as ar\r\nimport statsmodels.tsa.arima_model as ma\r\n\r\ndef mean(list):\r\n return sum(list) / len(list)\r\n\r\ndef covar(list1, list2):\r\n mean_1 = mean(list1)\r\n mean_2 = mean(list2)\r\n sum_1 = 0\r\n sum_2 = 0\r\n for i in list1:\r\n sum_1 = sum_1 + (i - mean_1)\r\n\r\n for i in list2:\r\n sum_2 = sum_2 + (i - mean_2)\r\n\r\n return 0\r\n\r\ndef calc_acf(data):\r\n # data segments, separared by lag\r\n #df = p.DataFrame(index=index, columns=columns)\r\n data_seg1 = data.loc[:, [\"5Y\"]]\r\n print(\"Seg 1\")\r\n print(data_seg1)\r\n acf = []\r\n variance = data_seg1.var()\r\n for i in range(1, 11):\r\n print(\"Seg 2\")\r\n data_seg2 = data.loc[i + 1:, [\"5Y\"]]\r\n print(data_seg2)\r\n series_1 = p.Series(data_seg1[\"5Y\"])\r\n series_2 = p.Series(data_seg2[\"5Y\"])\r\n cov_i = series_1.cov(series_2)\r\n acf.append(cov_i / variance)\r\n\r\n # return s.acf(data)\r\n return acf\r\n\r\ndef calc_pacf(data, lag):\r\n data_seg_1 = data.loc[:, [\"5Y\"]]\r\n series = p.Series(data_seg_1[\"5Y\"])\r\n regr = linear_model.LinearRegression()\r\n\r\n\r\n # data_seg1 = data.loc[:, [\"5Y\"]]\r\n data_seg_2 = data[lag:]\r\n # data_seg2 = data.loc[i:, [\"5Y\"]]\r\n pacf = sm.pacf(data, nlags=lag, method='ywunbiased')\r\n #pacf = []\r\n #for k in range(1, 11):\r\n #pacf.append(regr.predict(data_seg_1))\r\n #pacf.append(lm.yule_walker(p.Series.ravel(series))[0][-1])\r\n return pacf\r\n\r\nsrc = 'bond-returns.csv'\r\nsrc = input(\"Please enter the file: \")\r\ndata = p.read_csv(src, sep = \"|\")\r\n\r\n#print(data) # MUST GIVE COLUMN HEADERS in CSV FILE BEFORE WE DO THIS\r\nfive_year_data = data[[\"5Y\"]]\r\nprint(five_year_data) # how do we get this to work as a 1D array?\r\n#five_year_data_1d = p.Series.ravel(five_year_data)\r\n#print(five_year_data_1d)\r\nacf = calc_acf(five_year_data)\r\nprint(acf[0])\r\nprint(\"ACF is\", acf)\r\n# plt.xlim(1, 10)\r\nacf_plt = s.plot_acf(acf)\r\npacf = calc_pacf(five_year_data, 10)\r\nprint(pacf)\r\npacf_plt = s.plot_pacf(pacf)\r\nplt.show()\r\n\r\n# Param estimation\r\n#model = a(five_year_data, order=(10,1,0))\r\n# model_fit = model.fit(disp=0)\r\n# print(model_fit.summary())\r\n\r\nar_model = ar.AR(five_year_data)\r\nar_model_fit = ar_model.fit(10)\r\nprint(\"Params\")\r\nprint(ar_model_fit.params)\r\n\r\nma_model = ma.ARMA(five_year_data.values, (0, 10))\r\nma_model_fit = ma_model.fit()\r\nprint(\"MA Params\")\r\nprint(ma_model_fit.params)","repo_name":"dsli208/Computational-Finance","sub_path":"HW4/cse390_hw4.py","file_name":"cse390_hw4.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30865310741","text":"from random import sample\n\n\n# -------------------------------------------------------------------------------------------------------------- #\n# Функция main является логикой данного приложения. В начале пользователь вводит своё имя, что бы программа его\n# запомнила и в дальнейшем записала в файл. Далее присваиваем переменной words функцию read_words('words.txt'),\n# которая прочитает слова из файла и вернётся к нам списком.Заведем счетчик для подсчета баллов.\n# Начнем цикл for по переменной words, поскольку результат функции в ней уже саккумулирован, цикл будет делать\n# итерации по списку, а затем переменная sample_word, поскольку находится в теле цикла и ссылается на функцию\n# word_sample(word) будет перехватывать каждое слово для перемешивания в нем букв и возврата значения обратно\n# в себя. Затем попросим пользователя отгадать перемешанное слово, приведя его ответ к нижнему регистру на всякий\n# случай. Если ответ верный, + 10 баллов, если нет - выводим правильный ответ.\n# В конце нам остаётся только вызвать функцию record_top_players в которую мы передали аргументы в виде файла для\n# записи истории, именем пользователя и счетчика баллов, а также функцию reading_top_players с аргументом в виде\n# файла с историей, в который уже записаны все необходимые данные. \"Вызовем её через print()\".\n\n\ndef main():\n\n \"\"\"\n Основная функция с логикой\n :return: Результат работы всей программы. Запись игрока в топ, вывод результата игры.\n \"\"\"\n user_name = input('Enter your name: ')\n while user_name != 'stop':\n words = read_words('words.txt') # <-------- str.52\n points_counter = 0\n\n for word in words:\n sample_word = word_sample(word) # <-------- str.75\n user_input = input(f'Guess the word: {sample_word} - ').lower()\n\n if user_input == word:\n points_counter += 10\n print(f'That right, you get 10 points.', end='\\n' * 2)\n else:\n print(f'Wrong, the right word is - {word}', end='\\n' * 2)\n\n record_top_players(history='history.txt', user=user_name, score=points_counter) # --------> str.93\n print(reading_top_players(history='history.txt'), \"\\n\") # --------> str.116\n\n user_name = input('Do you want to play again? If yes, enter your name, if no, enter \"stop\"\\n: ')\n if user_name == 'stop':\n print('See you!')\n\n# -------------------------------------------------------------------------------------------------------------- #\n# Функция read_words с аргументом (file_words) который является txt файлом из которого будут читаться слова.\n# Слова будут аккумулироваться в \"list comprehension - генератор списков\". return будет возвращать готовый\n# для работы сгенерированный список со словами из файла.\n\n\ndef read_words(file_words: str): # --------> str.26\n\n \"\"\"\n Читает слова из файлы words.txt\n :param file_words: words.txt\n :return: Сгенерированный список со словами из файла.\n \"\"\"\n with open(file_words, encoding='utf-8') as text:\n result = [i.strip() for i in text.readlines()]\n return result\n\n\n# -------------------------------------------------------------------------------------------------------------- #\n# Функция word_sample с аргументом word который является переменной цикла for в функции main(),\n# который в свою очередь делает итерации по переменной words, которая ссылается на функцию read_words,\n# и которая уже прочитала слова из списка words.txt. Разместив sample_word в теле цикла for функции main\n# мы добиваемся того, что в каждую новую итерацию, в данную функцию будет приходить следующее слово\n# для перемешивания букв. Мешать буквы мы будем функцией sample, которая возвращает список n длинны\n# случайных элементов из выбранной последовательности, строки/списка/кортежа. В нашем случае это слово,\n# по которому бежит цикл for и длинна слова. Поскольку sample возвращает \"список\" символов, нам нужно\n# на выход склеить перемешанный список из букв в слово, делаем это с помощью join и возвращаем в main.\n\n\ndef word_sample(word: str): # --------> str.30\n\n \"\"\"\n Перемешивает буквы в слове.\n :param word: - это циклическая переменная которая совершает итерации по списку со словами.\n :return: перемешанное слово.\n \"\"\"\n result = sample(word, len(word))\n return \"\".join(result)\n\n\n# -------------------------------------------------------------------------------------------------------------- #\n# Функция record_top_players с аргументами в виде 1) - файла history.txt 2) - именем пользователя\n# 3) - количеством очков, которые набрал пользователь за игру и 4) - разделителем.\n# Далее с помощью переменной user_result и f строки, мы передадим в ней аргументы из функции record_top_players\n# которые к этому моменту уже записали и обработали необходимые данные в предыдущих функциях и отработали в main.\n\n\ndef record_top_players(history, user, score, sep=': '): # <-------- str.39\n\n \"\"\"\n Ведет запись топ игроков в топ.\n :param history: history.txt\n :param user: Имя пользователя\n :param score: Количество набранных баллов\n :param sep: Разделитель для удобства чтения\n :return: ничего не возвращает, а только принимает в себя готовые для записи параметры из main()\n \"\"\"\n user_result = f'{user}{sep}{score}\\n'\n with open(history, 'a', encoding='utf-8') as text:\n text.write(user_result)\n\n\n# -------------------------------------------------------------------------------------------------------------- #\n# Функция reading_top_players имеет один аргумент в виде файла history.txt| для чтения результатов всех игр.\n# В функции две переменные 1) - count_games - который считает количество игр, и 2) - count_scores - список,\n# который будет записывать в себя все имеющиеся данные после \": \" . После подсчета результатов данные перемен-\n# ные будут отправленные в return для объявления результат игр. Для вывода максимального балла используем функцию\n# max() к списку.\n\n\ndef reading_top_players(history: str): # <-------- str.40\n\n \"\"\"\n Читает из файла результаты игр.\n :param history: history.txt с готовыми параметрами.\n :return: результат с количеством игр и максимальным рейтингом.\n \"\"\"\n with open(history, encoding='utf-8') as text:\n count_games, count_scores = 0, []\n\n for i in text:\n name, score = i.strip().split(': ')\n count_games += 1\n count_scores.append(score)\n\n return f'Total games played: {count_games}, the maximum record is: {max(count_scores)}'\n\n\n# -------------------------------------------------------------------------------------------------------------- #\n# Данный паттерн является началом \"инициализатором\" старта программы.\n\n\nif __name__ == '__main__':\n main()\n##################################################################################################################\n","repo_name":"Stereo2025/SkP_2_course_6_HW","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11783255054","text":"from keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.models import load_model\nimport pandas as pd\nimport os\nimport numpy as np\n\n# dimensions of our images.\nmodel_path = './Transfer_Learning/Models/model.tf.hdf5'\nimg_width, img_height = 128, 128\nnb_train_samples = 210\nnb_validation_samples = 19\nepochs = 20\nbatch_size = 5\nLoad_model = True\n\n# build the VGG16 network\nmodel = applications.VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3))\nprint('Model loaded.')\n\n# Adding custom Layers\nx = model.output\nx = Flatten()(x)\nx = Dense(128, activation=\"relu\")(x)\nx = Dropout(0.5)(x)\nx = Dense(128, activation=\"relu\")(x)\npredictions = Dense(10, activation=\"softmax\")(x)\n\n# creating the final model\nmodel_final = Model(input = model.input, output = predictions)\n\n# compile the model\nmodel_final.compile(loss = \"categorical_crossentropy\", optimizer = optimizers.SGD(lr=1e-4), metrics=[\"accuracy\"])\n\n# Build neural network for training\nif Load_model and os.path.exists(model_path):\n print('==> loading pre-trained model')\n model_final = load_model(model_path)\n model_final.compile(loss = \"categorical_crossentropy\", optimizer = optimizers.SGD(lr=1e-4), metrics=[\"accuracy\"])\n\n# Train New Model\ntrain = pd.read_csv(\"./Transfer_Learning/Data/Train/flower_labels.csv\")\ntest = pd.read_csv(\"./Transfer_Learning/Data/Test/flower_labels.csv\")\ntrain_path = \"./Transfer_Learning/Data/Train/\"\ntest_path = \"./Transfer_Learning/Data/Test/\"\n\n# Convert Images from path to x_train, y_train, x_test, y_test\nx_train = []\nfor i in range(len(train)):\n temp_img=image.load_img(train_path + train['file'][i],target_size=(img_width,img_height))\n temp_img=image.img_to_array(temp_img)\n x_train.append(temp_img)\n\n#converting train images to array and applying mean subtraction processing\nx_train=np.array(x_train)\nx_train=preprocess_input(x_train)\n\nx_test = []\nfor i in range(len(test)):\n temp_img=image.load_img(test_path + test['file'][i],target_size=(img_width,img_height))\n temp_img=image.img_to_array(temp_img)\n x_test.append(temp_img)\n\n#converting train images to array and applying mean subtraction processing\nx_test = np.array(x_test)\nx_test = preprocess_input(x_test)\n\n# one-hot encoding for the target variable - for train\ntrain_y = np.asarray(train['label'])\ntrain_y = pd.get_dummies(train_y)\ntrain_y = np.array(train_y)\n\n# one-hot encoding for the target variable - for test\ntest_y = np.asarray(test['label'])\ntest_y = pd.get_dummies(test_y)\ntest_y = np.array(test_y)\n\ncheckpointer = ModelCheckpoint(filepath='./Transfer_Learning/Models/model.tf.hdf5', verbose=1, save_best_only=True)\nmodel_final.fit(x_train, train_y, batch_size=batch_size, epochs=epochs,\n validation_split=0.2, callbacks=[checkpointer],\n verbose=1, shuffle=True)","repo_name":"sharon12312/cifar-10","sub_path":"Transfer_Learning/transfer_net.py","file_name":"transfer_net.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30697043246","text":"import numpy as np\r\n\r\nname_list = ['小明','小華','小菁','小美','小張','John','Mark','Tom']\r\nsex_list = ['boy','boy','girl','girl','boy','boy','boy','boy']\r\nweight_list = [67.5,75.3,50.1,45.5,80.8,90.4,78.4,70.7]\r\nrank_list = [8,1,5,4,7,6,2,3]\r\nmyopia_list = [True,True,False,False,True,True,False,False]\r\n\r\n#1. 將上列list依照['name', 'sex', 'weight', 'rank', 'myopia']順序擺入array,並且資料型態順序擺入[Unicode,Unicode,float,int,boolean]\r\nstudent_type = {'names':('name', 'sex', 'weight', 'rank', 'myopia'), 'formats':('U10','U6','f8','i4','?')}\r\nstudents = np.zeros(8,dtype=student_type)\r\nstudents['name'] = name_list\r\nstudents['sex'] = sex_list\r\nstudents['weight'] = weight_list\r\nstudents['rank'] = rank_list\r\nstudents['myopia'] = myopia_list\r\n# print(students)\r\n# [('小明', 'boy', 67.5, 8, True) ('小華', 'boy', 75.3, 1, True)\r\n# ('小菁', 'girl', 50.1, 5, False) ('小美', 'girl', 45.5, 4, False)\r\n# ('小張', 'boy', 80.8, 7, True) ('John', 'boy', 90.4, 6, True)\r\n# ('Mark', 'boy', 78.4, 2, False) ('Tom', 'boy', 70.7, 3, False)]\r\n\r\n#2. 呈上題,將array中體重(weight)數據集取出算出全部平均體重\r\nmean_weight = np.mean(students['weight'])\r\n# print(mean_weight) \r\n# 69.8375\r\n\r\n#3. 呈上題,進一步算出男生(sex欄位是boy)平均體重\r\nboy_index = np.where(students['sex']=='boy')[0] #(array([0, 1, 4, 5, 6, 7], dtype=int64),)選陣列[0]\r\nboy_weight_mean = np.mean(students['weight'][boy_index])\r\n# print(boy_weight_mean)\r\n# 77.18333333333332\r\n\r\n#3. 呈上題,進一步算出女生(sex欄位是girl)平均體重\r\ngirl_index = np.where(students['sex']=='girl')[0] #(array([2, 3], dtype=int64),)選陣列[0]\r\ngril_weight_mean = np.mean(students['weight'][girl_index])\r\n# print(gril_weight_mean)\r\n# 47.8\r\n","repo_name":"BrewTC/Python60Days","sub_path":"Day8.py","file_name":"Day8.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33193344338","text":"# -*- coding: utf-8 -*-\n# -----------------------------Импорт модулей-----------------------------------\n\nfrom PyQt5 import QtSql\nimport os\nimport os.path\nimport shutil\n\nfrom PyQt5.QtWidgets import QWidget, QLabel, QVBoxLayout, QFormLayout, QTableWidget, QComboBox, \\\n QSpinBox, QPushButton, QListWidgetItem\n\n# -----------------------------------Форма--------------------------------------\n\nclass fvSchemes_form_class(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n\n self.interface_lng_val = parent.interface_lng_val\n self.con = parent.con\n self.full_dir = parent.full_dir\n self.par = parent\n\n if self.con.open():\n\n table = QTableWidget(12, 2)\n table.setColumnWidth(0, 250)\n table.setColumnWidth(1, 230)\n table.setFixedSize(674, 480)\n table.setHorizontalHeaderLabels([\"Параметр\", \"Значение\"])\n\n # d2dt2Schemes.default\n d2dt2Schemes_default_lbl = QLabel('d2dt2Schemes.default')\n self.d2dt2Schemes_default = QComboBox()\n d2dt2Schemes_default_list = [\"steadyState\", \"demo\"]\n self.d2dt2Schemes_default.addItems(d2dt2Schemes_default_list)\n table.setCellWidget(0, 1, self.d2dt2Schemes_default)\n table.setCellWidget(0, 0, d2dt2Schemes_default_lbl)\n\n # ddtSchemes.default\n ddtSchemes_default_lbl = QLabel('ddtSchemes.default')\n self.ddtSchemes_default = QComboBox()\n ddtSchemes_default_list = [\"Euler\", \"demo\"]\n self.ddtSchemes_default.addItems(ddtSchemes_default_list)\n table.setCellWidget(1, 1, self.ddtSchemes_default)\n table.setCellWidget(1, 0, ddtSchemes_default_lbl)\n\n # gradSchemes.default\n gradSchemes_default_lbl = QLabel('gradSchemes.default')\n self.gradSchemes_default = QComboBox()\n gradSchemes_default_list = [\"leastSquares\", \"demo\"]\n self.gradSchemes_default.addItems(gradSchemes_default_list)\n table.setCellWidget(2, 1, self.gradSchemes_default)\n table.setCellWidget(2, 0, gradSchemes_default_lbl)\n\n # gradSchemes.grad(D)\n gradSchemes_grad_D_lbl = QLabel('gradSchemes.grad(D)')\n self.gradSchemes_grad_D = QComboBox()\n gradSchemes_grad_D_list = [\"leastSquares\", \"demo\"]\n self.gradSchemes_grad_D.addItems(gradSchemes_grad_D_list)\n table.setCellWidget(3, 1, self.gradSchemes_grad_D)\n table.setCellWidget(3, 0, gradSchemes_grad_D_lbl)\n\n # gradSchemes.grad(T)\n gradSchemes_grad_T_lbl = QLabel('gradSchemes.grad(T)')\n self.gradSchemes_grad_T = QComboBox()\n gradSchemes_grad_T_list = [\"leastSquares\", \"demo\"]\n self.gradSchemes_grad_T.addItems(gradSchemes_grad_T_list)\n table.setCellWidget(4, 1, self.gradSchemes_grad_T)\n table.setCellWidget(4, 0, gradSchemes_grad_T_lbl)\n\n # divSchemes.default\n divSchemes_default_lbl = QLabel('divSchemes.default')\n self.divSchemes_default = QComboBox()\n divSchemes_default_list = [\"none\", \"demo\"]\n self.divSchemes_default.addItems(divSchemes_default_list)\n table.setCellWidget(5, 1, self.divSchemes_default)\n table.setCellWidget(5, 0, divSchemes_default_lbl)\n\n # divSchemes.div(sigmaD)\n divSchemes_div_sigmaD_lbl = QLabel('divSchemes.div(sigmaD)')\n self.divSchemes_div_sigmaD = QComboBox()\n divSchemes_div_sigmaD_list = [\"Gauss linear\", \"demo\"]\n self.divSchemes_div_sigmaD.addItems(divSchemes_div_sigmaD_list)\n table.setCellWidget(6, 1, self.divSchemes_div_sigmaD)\n table.setCellWidget(6, 0, divSchemes_div_sigmaD_lbl)\n\n # laplacianSchemes.default\n laplacianSchemes_default_lbl = QLabel('laplacianSchemes.default')\n self.laplacianSchemes_default = QComboBox()\n laplacianSchemes_default_list = [\"none\", \"demo\"]\n self.laplacianSchemes_default.addItems(laplacianSchemes_default_list)\n table.setCellWidget(7, 1, self.laplacianSchemes_default)\n table.setCellWidget(7, 0, laplacianSchemes_default_lbl)\n\n # laplacianSchemes.laplacian(DD,D)\n laplacianSchemes_laplacian_DD_D_lbl = QLabel('laplacianSchemes.laplacian(DD,D)')\n self.laplacianSchemes_laplacian_DD_D = QComboBox()\n laplacianSchemes_laplacian_DD_D_list = [\"Gauss linear corrected\", \"demo\"]\n self.laplacianSchemes_laplacian_DD_D.addItems(laplacianSchemes_laplacian_DD_D_list)\n table.setCellWidget(8, 1, self.laplacianSchemes_laplacian_DD_D)\n table.setCellWidget(8, 0, laplacianSchemes_laplacian_DD_D_lbl)\n\n # laplacianSchemes.laplacian(DT,T)\n laplacianSchemes_laplacian_DT_T_lbl = QLabel('laplacianSchemes.laplacian(DT,T)')\n self.laplacianSchemes_laplacian_DT_T = QComboBox()\n laplacianSchemes_laplacian_DT_T_list = [\"Gauss linear corrected\", \"demo\"]\n self.laplacianSchemes_laplacian_DT_T.addItems(laplacianSchemes_laplacian_DT_T_list)\n table.setCellWidget(9, 1, self.laplacianSchemes_laplacian_DT_T)\n table.setCellWidget(9, 0, laplacianSchemes_laplacian_DT_T_lbl)\n\n # interpolationSchemes.default\n interpolationSchemes_default_lbl = QLabel('interpolationSchemes.default')\n self.interpolationSchemes_default = QComboBox()\n interpolationSchemes_default_list = [\"linear\", \"demo\"]\n self.interpolationSchemes_default.addItems(interpolationSchemes_default_list)\n table.setCellWidget(10, 1, self.interpolationSchemes_default)\n table.setCellWidget(10, 0, interpolationSchemes_default_lbl)\n\n # snGradSchemes.default\n snGradSchemes_default_lbl = QLabel('snGradSchemes.default')\n self.snGradSchemes_default = QComboBox()\n snGradSchemes_default_list = [\"demo\", \"none\"]\n self.snGradSchemes_default.addItems(snGradSchemes_default_list)\n table.setCellWidget(11, 1, self.snGradSchemes_default)\n table.setCellWidget(11, 0, snGradSchemes_default_lbl)\n\n # вывод значений параметров\n if 'fvSchemes' in self.con.tables():\n\t\t\t\t\t\t\n query = QtSql.QSqlQuery()\n query.exec(\"SELECT * FROM fvSchemes\")\n if query.isActive():\n query.first()\n value_list = []\n while query.isValid():\n value_res = query.value('value')\n value_list.append(value_res)\n query.next()\n\t\t\t\t\t\n # d2dt2Schemes_default\n d2dt2Schemes_default_mas = self.d2dt2Schemes_default.count() \n for i in range(d2dt2Schemes_default_mas):\n if self.d2dt2Schemes_default.itemText(i) == value_list[0]:\n self.d2dt2Schemes_default.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # ddtSchemes_default\n ddtSchemes_default_mas = self.ddtSchemes_default.count() \n for i in range(ddtSchemes_default_mas):\n if self.ddtSchemes_default.itemText(i) == value_list[1]:\n self.ddtSchemes_default.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # gradSchemes_default\n gradSchemes_default_mas = self.gradSchemes_default.count() \n for i in range(gradSchemes_default_mas):\n if self.gradSchemes_default.itemText(i) == value_list[2]:\n self.gradSchemes_default.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # gradSchemes_grad_D\n gradSchemes_grad_D_mas = self.gradSchemes_grad_D.count() \n for i in range(gradSchemes_grad_D_mas):\n if self.gradSchemes_grad_D.itemText(i) == value_list[3]:\n self.gradSchemes_grad_D.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # gradSchemes_grad_T\n gradSchemes_grad_T_mas = self.gradSchemes_grad_T.count() \n for i in range(gradSchemes_grad_T_mas):\n if self.gradSchemes_grad_T.itemText(i) == value_list[4]:\n self.gradSchemes_grad_T.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # divSchemes_default\n divSchemes_default_mas = self.divSchemes_default.count() \n for i in range(divSchemes_default_mas):\n if self.divSchemes_default.itemText(i) == value_list[5]:\n self.divSchemes_default.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # divSchemes_div_sigmaD\n divSchemes_div_sigmaD_mas = self.divSchemes_div_sigmaD.count() \n for i in range(divSchemes_div_sigmaD_mas):\n if self.divSchemes_div_sigmaD.itemText(i) == value_list[6]:\n self.divSchemes_div_sigmaD.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # laplacianSchemes_default\n laplacianSchemes_default_mas = self.laplacianSchemes_default.count() \n for i in range(laplacianSchemes_default_mas):\n if self.laplacianSchemes_default.itemText(i) == value_list[7]:\n self.laplacianSchemes_default.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # laplacianSchemes_laplacian_DD_D\n laplacianSchemes_laplacian_DD_D_mas = self.laplacianSchemes_laplacian_DD_D.count() \n for i in range(laplacianSchemes_laplacian_DD_D_mas):\n if self.laplacianSchemes_laplacian_DD_D.itemText(i) == value_list[8]:\n self.laplacianSchemes_laplacian_DD_D.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # laplacianSchemes_laplacian_DT_T\n laplacianSchemes_laplacian_DT_T_mas = self.laplacianSchemes_laplacian_DT_T.count() \n for i in range(laplacianSchemes_laplacian_DT_T_mas):\n if self.laplacianSchemes_laplacian_DT_T.itemText(i) == value_list[9]:\n self.laplacianSchemes_laplacian_DT_T.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\n # interpolationSchemes_default\n interpolationSchemes_default_mas = self.interpolationSchemes_default.count() \n for i in range(interpolationSchemes_default_mas):\n if self.interpolationSchemes_default.itemText(i) == value_list[10]:\n self.interpolationSchemes_default.setCurrentIndex(i)\n\t\t\t\t\t\t\t\n # snGradSchemes_default\n snGradSchemes_default_mas = self.snGradSchemes_default.count() \n for i in range(snGradSchemes_default_mas):\n if self.snGradSchemes_default.itemText(i) == value_list[11]:\n self.snGradSchemes_default.setCurrentIndex(i)\n\n\n btnSave = QPushButton()\n btnSave.setFixedSize(80, 25)\n btnSave.clicked.connect(self.on_btnSave_clicked)\n\n if self.interface_lng_val == 'Russian':\n btnSave.setText(\"Сохранить\")\n elif self.interface_lng_val == 'English':\n btnSave.setText(\"Save\")\n\n vbox = QVBoxLayout()\n vbox.addWidget(table)\n vbox.addWidget(btnSave)\n\n# ---------------------Размещение на форме всех компонентов-------------------------\n\n form = QFormLayout()\n form.addRow(vbox)\n self.setLayout(form)\n\n def on_btnSave_clicked(self):\n d2dt2Schemes_default_txt = self.d2dt2Schemes_default.currentText()\n ddtSchemes_default_txt = self.ddtSchemes_default.currentText()\n gradSchemes_default_txt = self.gradSchemes_default.currentText()\n gradSchemes_grad_D_txt = self.gradSchemes_grad_D.currentText()\n gradSchemes_grad_T_txt = self.gradSchemes_grad_T.currentText()\n divSchemes_default_txt = self.divSchemes_default.currentText()\n divSchemes_div_sigmaD_txt = self.divSchemes_div_sigmaD.currentText()\n laplacianSchemes_default_txt = self.laplacianSchemes_default.currentText()\n laplacianSchemes_laplacian_DD_D_txt = self.laplacianSchemes_laplacian_DD_D.currentText()\n laplacianSchemes_laplacian_DT_T_txt = self.laplacianSchemes_laplacian_DT_T.currentText()\n interpolationSchemes_default_txt = self.interpolationSchemes_default.currentText()\n snGradSchemes_default_txt = self.snGradSchemes_default.currentText()\n\t\t\n if 'fvSchemes' not in self.con.tables():\n query = QtSql.QSqlQuery()\n query.exec(\"CREATE TABLE fvSchemes(param, value)\")\n\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('d2dt2Schemes.default', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('ddtSchemes.default', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('gradSchemes.default', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('gradSchemes.grad(D)', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('gradSchemes.grad(T)', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('divSchemes.default', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('divSchemes.div(sigmaD)', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('laplacianSchemes.default', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('laplacianSchemes.laplacian(DD,D)', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('laplacianSchemes.laplacian(DT,T)', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('interpolationSchemes.default', ''))\n query.exec(\"INSERT INTO fvSchemes(param, value) VALUES ('%s','%s')\" % ('snGradSchemes.default', ''))\n\n\n if 'fvSchemes' in self.con.tables():\n query = QtSql.QSqlQuery()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='d2dt2Schemes.default'\")\n query.bindValue(0, d2dt2Schemes_default_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='ddtSchemes.default'\")\n query.bindValue(0, ddtSchemes_default_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='gradSchemes.default'\")\n query.bindValue(0, gradSchemes_default_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='gradSchemes.grad(D)'\")\n query.bindValue(0, gradSchemes_grad_D_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='gradSchemes.grad(T)'\")\n query.bindValue(0, gradSchemes_grad_T_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='divSchemes.default'\")\n query.bindValue(0, divSchemes_default_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='divSchemes.div(sigmaD)'\")\n query.bindValue(0, divSchemes_div_sigmaD_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='laplacianSchemes.default'\")\n query.bindValue(0, laplacianSchemes_default_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='laplacianSchemes.laplacian(DD,D)'\")\n query.bindValue(0, laplacianSchemes_laplacian_DD_D_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='laplacianSchemes.laplacian(DT,T)'\")\n query.bindValue(0, laplacianSchemes_laplacian_DT_T_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='interpolationSchemes.default'\")\n query.bindValue(0, interpolationSchemes_default_txt)\n query.exec_()\n\n query.prepare(\"UPDATE fvSchemes SET value=? WHERE param='snGradSchemes.default'\")\n query.bindValue(0, snGradSchemes_default_txt)\n query.exec_()\n\n # записываем файл fvSchemes\n if os.path.exists(self.full_dir + '/system/fvSchemes'):\n os.remove(self.full_dir + '/system/fvSchemes')\n\t\t\n shutil.copyfile(\"./matches/Shablon/system/fvSchemes\", self.full_dir + '/system/fvSchemes')\n\n fvS = open(self.full_dir + '/system/fvSchemes', 'a')\n ###d2dt2Schemes###\n d2dt2Schemes_bl = '\\n' + 'd2dt2Schemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + d2dt2Schemes_default_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n ###ddtSchemes###\n ddtSchemes_bl = 'ddtSchemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + ddtSchemes_default_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n ###gradSchemes###\n gradSchemes_bl = 'gradSchemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + gradSchemes_default_txt + ';' + '\\n' \\\n + ' ' + 'grad(D)' + ' ' + gradSchemes_grad_D_txt + ';' + '\\n' \\\n + ' ' + 'grad(T)' + ' ' + gradSchemes_grad_T_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n ###divSchemes###\n divSchemes_bl = 'divSchemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + divSchemes_default_txt + ';' + '\\n' \\\n + ' ' + 'div(sigmaD)' + ' ' + divSchemes_div_sigmaD_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n ###laplacianSchemes###\n laplacianSchemes_bl = 'laplacianSchemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + laplacianSchemes_default_txt + ';' + '\\n' \\\n + ' ' + 'laplacian(DD,D)' + ' ' + laplacianSchemes_laplacian_DD_D_txt + ';' + '\\n' \\\n + ' ' + 'laplacian(DT,T)' + ' ' + laplacianSchemes_laplacian_DT_T_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n ###interpolationSchemes###\n interpolationSchemes_bl = 'interpolationSchemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + interpolationSchemes_default_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n ###snGradSchemes###\n snGradSchemes_bl = 'snGradSchemes' + '\\n' + '{' + '\\n' + ' ' + 'default' + ' ' + snGradSchemes_default_txt + ';' + '\\n' + '}' + '\\n\\n'\n\n fvS.write(d2dt2Schemes_bl + ddtSchemes_bl + gradSchemes_bl + divSchemes_bl + laplacianSchemes_bl + interpolationSchemes_bl + snGradSchemes_bl)\n close_str = '// ************************************************************************* //'\n fvS.write(close_str)\n\n fvS.close()\n\n self.par.cdw.setWidget(self.par.outf_scroll)\n outf = open(self.full_dir + '/system/fvSchemes')\n\n if self.interface_lng_val == 'Russian':\n msg_lbl = QLabel(\n 'Файл fvSchemes сохранен')\n elif self.interface_lng_val == 'English':\n msg_lbl = QLabel(\n 'The fvSchemes file was saved')\n\n self.par.listWidget.clear()\n self.par.item = QListWidgetItem()\n self.par.listWidget.addItem(self.par.item)\n self.par.listWidget.setItemWidget(self.par.item, msg_lbl)\n\n data = outf.read()\n\n if self.interface_lng_val == 'Russian':\n self.par.outf_lbl.setText(\"Файл \" + \"\" + 'fvSchemes' + \"\")\n elif self.interface_lng_val == 'English':\n self.par.outf_lbl.setText(\"\" + 'fvSchemes' + \"\" + \" file\")\n self.par.outf_edit.setText(data)\n\n self.par.cdw.setTitleBarWidget(self.par.cdw_frame)\n outf.close()\n\n","repo_name":"DmitryChitalov/stress_analysis_app","sub_path":"forms/fvSchemes_form.py","file_name":"fvSchemes_form.py","file_ext":"py","file_size_in_byte":20190,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"27976428440","text":"import fixtures\nimport json\nfrom lxml import etree\nfrom oslo.config import cfg\nimport requests\nimport testtools\nfrom testtools import matchers\nimport urlparse\n\nfrom os_collect_config import cfn\nfrom os_collect_config import collect\nfrom os_collect_config import exc\n\n\nMETA_DATA = {u'int1': 1,\n u'strfoo': u'foo',\n u'map_ab': {\n u'a': 'apple',\n u'b': 'banana',\n }}\n\n\nclass FakeResponse(dict):\n def __init__(self, text):\n self.text = text\n\n def raise_for_status(self):\n pass\n\n\nclass FakeRequests(object):\n exceptions = requests.exceptions\n\n def __init__(self, testcase):\n self._test = testcase\n\n def Session(self):\n class FakeReqSession(object):\n def __init__(self, testcase):\n self._test = testcase\n\n def get(self, url, params, headers):\n url = urlparse.urlparse(url)\n self._test.assertEquals('/', url.path)\n self._test.assertEquals('application/json',\n headers['Content-Type'])\n self._test.assertIn('SignatureVersion', params)\n self._test.assertEquals('2', params['SignatureVersion'])\n self._test.assertIn('Signature', params)\n self._test.assertIn('Action', params)\n self._test.assertEquals('DescribeStackResource',\n params['Action'])\n self._test.assertIn('LogicalResourceId', params)\n self._test.assertEquals('foo', params['LogicalResourceId'])\n root = etree.Element('DescribeStackResourceResponse')\n result = etree.SubElement(root, 'DescribeStackResourceResult')\n detail = etree.SubElement(result, 'StackResourceDetail')\n metadata = etree.SubElement(detail, 'Metadata')\n metadata.text = json.dumps(META_DATA)\n return FakeResponse(etree.tostring(root))\n return FakeReqSession(self._test)\n\n\nclass FakeFailRequests(object):\n exceptions = requests.exceptions\n\n class Session(object):\n def get(self, url, params, headers):\n raise requests.exceptions.HTTPError(403, 'Forbidden')\n\n\nclass TestCfn(testtools.TestCase):\n def setUp(self):\n super(TestCfn, self).setUp()\n self.log = self.useFixture(fixtures.FakeLogger())\n collect.setup_conf()\n cfg.CONF.cfn.metadata_url = 'http://127.0.0.1:8000/'\n cfg.CONF.cfn.path = ['foo.Metadata']\n cfg.CONF.cfn.access_key_id = '0123456789ABCDEF'\n cfg.CONF.cfn.secret_access_key = 'FEDCBA9876543210'\n\n def test_collect_cfn(self):\n cfn_md = cfn.Collector(requests_impl=FakeRequests(self)).collect()\n self.assertThat(cfn_md, matchers.IsInstance(dict))\n\n for k in ('int1', 'strfoo', 'map_ab'):\n self.assertIn(k, cfn_md)\n self.assertEquals(cfn_md[k], META_DATA[k])\n\n self.assertEquals('', self.log.output)\n\n def test_collect_cfn_fail(self):\n cfn_collect = cfn.Collector(requests_impl=FakeFailRequests)\n self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)\n self.assertIn('Forbidden', self.log.output)\n\n def test_collect_cfn_no_path(self):\n cfg.CONF.cfn.path = None\n cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))\n self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)\n self.assertIn('No path configured', self.log.output)\n\n def test_collect_cfn_bad_path(self):\n cfg.CONF.cfn.path = ['foo']\n cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))\n self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)\n self.assertIn('Path not in format', self.log.output)\n\n def test_collect_cfn_no_metadata_url(self):\n cfg.CONF.cfn.metadata_url = None\n cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))\n self.assertRaises(exc.CfnMetadataNotConfigured, cfn_collect.collect)\n self.assertIn('No metadata_url configured', self.log.output)\n\n def test_collect_cfn_missing_sub_path(self):\n cfg.CONF.cfn.path = ['foo.Metadata.not_there']\n cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))\n self.assertRaises(exc.CfnMetadataNotAvailable, cfn_collect.collect)\n self.assertIn('Sub-key not_there does not exist', self.log.output)\n\n def test_collect_cfn_sub_path(self):\n cfg.CONF.cfn.path = ['foo.Metadata.map_ab']\n cfn_collect = cfn.Collector(requests_impl=FakeRequests(self))\n content = cfn_collect.collect()\n self.assertThat(content, matchers.IsInstance(dict))\n self.assertIn(u'b', content)\n self.assertEquals(u'banana', content[u'b'])\n","repo_name":"SpamapS/os-collect-config","sub_path":"os_collect_config/tests/test_cfn.py","file_name":"test_cfn.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33706946114","text":"# 形態素を表すクラスMorphを実装せよ.\n# このクラスは表層形(surface),基本形(base),品詞(pos),品詞細分類1(pos1)をメンバ変数に持つこととする.\n# さらに,CaboChaの解析結果(neko.txt.cabocha)を読み込み,各文をMorphオブジェクトのリストとして表現し,\n# 3文目の形態素列を表示せよ.\n#\n# cabocha出力ファイル format: 表層形,品詞,品詞細分類1,品詞細分類2,品詞細分類3,活用形,活用型,原形,読み,発音\n# あらかじめ sed コマンドでタブをカンマに置換しておく\nclass Morph(object):\n def __init__(self, surface, base, pos, pos1):\n self.surface = surface\n self.base = base\n self.pos = pos\n self.pos1 = pos1\n\n def __str__(self):\n return '表層形: {}, 基本形: {}, 品詞: {}, 品詞細分類1: {}'\\\n .format(self.surface, self.base, self.pos, self.pos1)\n\n\ndef extract_morph_data(line):\n m = line.split(',')\n return [m[0], m[7], m[1], m[2]]\n\n\ndef morpheme_analysis():\n ret = []\n with open('neko.txt.cabocha', 'r') as f:\n ml = []\n for fl in f:\n if fl.startswith('*'): continue\n line = fl.rstrip()\n if not line == 'EOS':\n ml.append(Morph(*extract_morph_data(line)))\n else:\n if len(ml) > 0:\n ret.append(ml)\n ml = []\n else:\n pass\n return ret\n\n\ndef main():\n morpheme_list = morpheme_analysis()\n for m in morpheme_list[2]:\n print(m)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yusekiya/nlp100_2015","sub_path":"Ch5/ex040.py","file_name":"ex040.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28669590165","text":"\"\"\"\n@Description: Example: Surname Classification with an MLP\n@Author(s): Stephen CUI\n@LastEditor(s): Stephen CUI\n@CreatedTime: 2023-04-28 15:01:05\n\"\"\"\n\n\nfrom torch.utils.data import Dataset\nfrom numpy import ndarray\nfrom pandas import DataFrame\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Type\n\n\nclass SurnameDataset(Dataset):\n def __getitem__(self, index) -> dict:\n row = self._target_df_.iloc[index]\n surname_vector = self._vectorizer.vectorize(row.surname)\n nationality_index = self._vectorizer.nationality_vocab.lookup_token(\n row.nationality)\n return {'x_surname': surname_vector,\n 'y_nationality': nationality_index}\n\n\nclass Vocabulary:\n pass\n\n\nclass SurnameVectorizer(object):\n def __init__(self, surname_vocab, nationality_vocab):\n self.surname_vocab = surname_vocab\n self.nationality_vocab = nationality_vocab\n\n def vectorize(self, surname: str) -> ndarray:\n \"\"\"vectorize the provided surname\n\n Args:\n surname (str): the surname\n\n Returns:\n ndarray: a collapsed one-hot encoding\n \"\"\"\n vocab = self.surname_vocab\n one_hot = np.zeros(len(vocab), dtype=np.float32)\n for token in surname:\n one_hot[vocab.lookup_token(token)] = 1\n return one_hot\n\n @classmethod\n def from_dataframe(cls, surname_df: DataFrame) -> Type['SurnameVectorizer']:\n surname_vocab = Vocabulary(unk_token=\"@\")\n nationality_vocab = Vocabulary(add_unk=False)\n\n for _, row in surname_df.iterrows():\n for letter in row.surname:\n surname_vocab.add_token(letter)\n nationality_vocab.add_token(row.nationality)\n return cls(surname_vocab, nationality_vocab)\n\n\nfrom torch import Tensor\n\n\nclass SurnameClassifier(nn.Module):\n def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):\n super(SurnameClassifier, self).__init__()\n self.fc1 = nn.Linear(input_dim, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, x_in: Tensor, apply_softmax: bool = False) -> Tensor:\n intermediate_vector = F.relu(self.fc1(x_in))\n prediction_vector = self.fc2(intermediate_vector)\n if apply_softmax:\n prediction_vector = F.softmax(prediction_vector)\n return prediction_vector\n","repo_name":"JPL-JUNO/NLP","sub_path":"NLPP/Codes/Ch04_Example_1.py","file_name":"Ch04_Example_1.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11216141259","text":"from typing import Dict\nfrom dataclasses import dataclass\nfrom urllib.parse import urljoin\n\nimport requests as req\nimport pandas as pd\nfrom requests_oauthlib import OAuth1Session\nfrom thought.service import APIService\nfrom thought.settings import (\n INSTAPAPER_BASE_URL,\n INSTAPAPER_BOOKMARKS_DIRECTORY,\n INSTAPAPER_CONSUMER_ID,\n INSTAPAPER_CONSUMER_SECRET,\n INSTAPAPER_PASS,\n INSTAPAPER_USER\n)\nfrom thought.utils import default_field\n\n\n# CONSTANTS SPECIFIC TO THIS SERVICE\nAUTH_MODE = 'client_auth'\nENDPOINT_ALL_BOOKMARKS = 'bookmarks/list'\nENDPOINT_AUTH = 'oauth/access_token'\n\n\n@dataclass\nclass InstapaperAPI(APIService):\n \"\"\"\n Instapaper API Client object\n \"\"\"\n _base_url: str = default_field(INSTAPAPER_BASE_URL, init=False, repr=False)\n _api_version: float = default_field(1, init=False, repr=False)\n\n def __post_init__(self):\n self.authorize()\n\n def _concate_url_from_parts(self, suffix: str):\n return '/'.join([self._base_url, str(self._api_version), suffix])\n\n @staticmethod\n def _build_auth_params():\n return {\n 'x_auth_username': INSTAPAPER_USER, \n 'x_auth_password': INSTAPAPER_PASS, \n 'x_auth_mode': AUTH_MODE\n }\n\n def bookmarks(self, folder: str) -> pd.DataFrame:\n suffix = ENDPOINT_ALL_BOOKMARKS\n url = self._concate_url_from_parts(suffix)\n response = self.client.get(url, params={'folder_id': folder})\n\n # filter out account info & metadata\n metadata_keys = {'type'}\n account_keys = {\n 'username',\n 'user_id',\n 'type',\n 'subscription_is_active'\n }\n holder = []\n for line in response.json():\n line_keys = set(line.keys())\n if line_keys == metadata_keys or line_keys == account_keys:\n continue\n holder.append(line)\n return pd.DataFrame(holder)\n \n def authorize(self) -> None:\n '''\n Authorizes with instapaper oauth1 api\n\n Since this is just my personal account this is a simple oauth implmentation\n '''\n auth_suffix = ENDPOINT_AUTH\n auth_token_url = self._concate_url_from_parts(auth_suffix)\n session = OAuth1Session(INSTAPAPER_CONSUMER_ID,\n client_secret=INSTAPAPER_CONSUMER_SECRET)\n params = self._build_auth_params()\n credentials = session.fetch_request_token(auth_token_url, params=params)\n\n if not session.authorized:\n raise CredentialsNotAuthorizedException(\"Not properly authorized, try again\")\n\n self.client = session\n","repo_name":"thunter009/thought","sub_path":"src/thought/services/instapaper.py","file_name":"instapaper.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37201815724","text":"import re\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom unidecode import unidecode\n\nfrom .base import base\n\nclass scraper(base):\n def __init__(self,wd: webdriver, path_to_umatrix: str):\n super(scraper, self).__init__(\"sherdog\")\n self.wd = wd\n self.wd.install_addon(path_to_umatrix)\n\n def _cards_getter(self):\n for x in range(0, 100):\n self.wd.get(\n f\"https://www.sherdog.com/organizations/Ultimate-Fighting-Championship-UFC-2/recent-events/{x}\")\n site = BeautifulSoup(self.wd.page_source, 'html.parser')\n if site.find('span',class_=\"no_events\"): break\n for line in (site.find('div', {'id': 'recent_tab'})\n .find('tbody').find_all('tr')[::-1]):\n if line.find('a'):\n self.cardsLinks.append(\n \"https://www.sherdog.com\" + line.a['href'])\n self.cardsLinks = list(set(self.cardsLinks))\n\n def _fights_getter(self):\n for cnumber, card in enumerate(self.cardsLinks):\n if not ((len(self.cardsLinks) - cnumber) % 100):\n print(f\"{len(self.size_cards) - cnumber} cards are left\")\n\n self.wd.implicitly_wait(1)\n self.wd.get(card)\n site = BeautifulSoup(self.wd.page_source, 'html.parser')\n if not site.find('div', class_='module fight_card'): continue\n for main_card in (site.find('div', class_='module fight_card')\n .find_all('a')):\n\n if main_card.find('span') and main_card.find(\n 'span').text not in self.fightsLinks:\n self.fightsLinks.append(\"https://www.sherdog.com\" + main_card['href'])\n\n for fight in (site.find('div', class_='content table')\n .find_all('tr', {'itemprop': 'subEvent'})):\n for fighter in (fight\n .find_all('td', {'itemprop': 'performer'})):\n if fighter.a.text not in self.fightsLinks:\n self.fightsLinks.append(\"https://www.sherdog.com\" + fighter.a['href'])\n\n\n def _get_one(self,url):\n def dateDecoder(string):\n months = {'Jan': '01', 'Feb': '02', 'Mar': '03',\n 'Apr': '04', 'May': '05', 'Jun': '06',\n 'Jul': '07', 'Aug': '08', 'Sep': '09',\n 'Oct': '10', 'Nov': '11', 'Dec': '12'}\n new_date = string.split('/')\n return '-'.join([new_date[2].strip(), months[new_date[0].strip()],new_date[1].strip()])\n\n def tostr(elem):\n if type(elem) == list:\n return list(map(str, elem))\n else:\n return str(elem)\n\n fighter = {'Name': '',\n 'Nickname': '',\n 'Birth date': '',\n 'Height': '',\n 'Record': {'wins': '', 'loses': '', 'draws': '',\n 'nc': ''},\n 'Affiliation': '',\n 'Nationality': '',\n 'Location region': '',\n 'Location city': '',\n 'Weight': '',\n 'wins summary': {'KO/TKO': '', 'SUBMISSIONS': '',\n 'DECISIONS': '', 'OTHERS': ''},\n 'loses summary': {'KO/TKO': '', 'SUBMISSIONS': '',\n 'DECISIONS': '', 'OTHERS': ''},\n 'bouts': []}\n\n self.wd.get(url)\n site = BeautifulSoup(self.wd.page_source, 'html.parser')\n\n # fighters summary\n bio = site.find('div', class_='module bio_fighter vcard')\n\n if not site.find('div', class_='module bio_fighter tagged'):\n if len(bio.find('h1').find_all('span')) > 1:\n fighter['Name'], fighter['Nickname'] = \\\n [str(x.text) for x in bio.find('h1').find_all('span')]\n else:\n fighter['Name'] = str(bio.find('h1').find('span').text)\n else:\n fighter['Name'] = str(site.find('span', class_='fn').text)\n fighter['Nickname'] = \\\n str(site.find('span', class_='nickname').text if\n site.find('span', class_='nickname') else '')\n\n fighter['Birth date'] = \\\n str(bio.find('span', {'itemprop': 'birthDate'}).text if\n bio.find('span', {'itemprop': 'birthDate'}) else '')\n\n if re.search('(\\d+\\.\\d+) cm',\n bio.find('div', class_='size_info').find('span', class_='item height').text):\n\n fighter['Height'] = \\\n (str(re.search('(\\d+\\.\\d+) cm', bio.find('div', class_='size_info')\n .find('span', class_='item height').text)[1]))\n\n if re.search('(\\d+\\.\\d+) kg',\n bio.find('div', class_='size_info').find('span',\n class_='item weight').text):\n fighter['Weight'] = str(re.search('(\\d+\\.\\d+) kg',\n bio.find('div',\n class_='size_info').find(\n 'span',\n class_='item weight').text)[1])\n fighter['Record']['wins'] = str(\n bio.find('span', class_='counter').text)\n fighter['Record']['loses'] = str(\n bio.find('div', class_='bio_graph loser').find('span',\n class_='counter').text)\n\n if bio.find('div', class_='right_side'):\n for outs in bio.find('div', class_='right_side').find_all('div', class_='bio_graph'):\n if outs.find('span', 'result').text == 'Draws':\n fighter['Record']['draws'] = str(outs.find('span', 'counter').text)\n if outs.find('span', 'result').text == 'N/C':\n fighter['Record']['nc'] = str(outs.find('span', 'counter').text)\n\n bouts_summary = [re.match('\\d+', x.text)[0] for x in bio.find_all('span', class_='graph_tag')]\n if len(bouts_summary) == 8:\n fighter['wins summary']['KO/TKO'], fighter['wins summary']['SUBMISSIONS'], fighter['wins summary'][\n 'DECISIONS'], \\\n fighter['wins summary']['OTHERS'] = tostr(bouts_summary[0:4])\n fighter['loses summary']['KO/TKO'], fighter['loses summary'][\n 'SUBMISSIONS'], fighter['loses summary']['DECISIONS'], \\\n fighter['loses summary']['OTHERS'] = tostr(bouts_summary[4:])\n else:\n fighter['wins summary']['KO/TKO'], fighter['wins summary'][\n 'SUBMISSIONS'], fighter['wins summary'][\n 'DECISIONS'] = tostr(bouts_summary[0:3])\n fighter['loses summary']['KO/TKO'], fighter['loses summary'][\n 'SUBMISSIONS'], fighter['loses summary'][\n 'DECISIONS'] = tostr(bouts_summary[3:])\n\n fighter['Nationality'] = str(bio.find('strong', {\n 'itemprop': 'nationality'}).text if bio.find('strong', {\n 'itemprop': 'nationality'}) else '')\n fighter['Affiliation'] = str(\n bio.find('a', class_='association').text if bio.find('a',\n class_='association') else '')\n\n if bio.find('span', class_='locality'):\n if ',' in bio.find('span', class_='locality').text:\n fighter['Location city'], fighter['Location region'] = tostr(\n bio.find('span', class_='locality').text.split(','))\n else:\n fighter['Location city'] = str(bio.find('span', class_='locality').text)\n\n # fights description\n for history in site.find_all('div', class_='module fight_history'):\n if (re.search('Pro Exhibition', history.find('h2').text,\n re.IGNORECASE) or\n re.search('Amateur', history.find('h2').text,\n re.IGNORECASE) or\n re.search('Upcoming', history.find('h2').text,\n re.IGNORECASE)): continue\n\n for bout in history.find_all('tr')[1:][::-1]:\n bout_info = {'result': '', 'opponent': '', 'event': '',\n 'date': '', 'method': '', 'referee': '',\n 'round': '', 'time': '', 'dc reason': \"\"}\n fight_info = bout.find_all('td')\n bout_info['result'] = str(fight_info[0].text)\n bout_info['opponent'] = str(fight_info[1].text)\n bout_info['event'] = str(fight_info[2].a.text)\n bout_info['date'] = str(dateDecoder(\n fight_info[2].find('span', 'sub_line').text) if\n fight_info[2].find('span',\n 'sub_line') else '')\n bout_info['method'] = str(fight_info[3].contents[0])\n if len(fight_info[3].contents) == 2:\n pass\n elif len(fight_info[3].contents[2]) == 1:\n bout_info['referee'] = str(\n fight_info[3].contents[2].text if\n fight_info[3].contents[2] else '')\n else:\n bout_info['referee'] = str(\n fight_info[3].contents[4].text if str(\n fight_info[3].contents[4]) != 'N/A' else '')\n bout_info['dc reason'] = str(\n unidecode(fight_info[3].contents[2]))\n\n bout_info['round'] = str(fight_info[4].text)\n bout_info['time'] = str(fight_info[5].text)\n fighter['bouts'].append(bout_info)\n\n return fighter\n","repo_name":"g13n4/ufcFightersScraper","sub_path":"ufcFightersScraper/base/sherdog.py","file_name":"sherdog.py","file_ext":"py","file_size_in_byte":9909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30740158492","text":"from AnalizadorLexico import *\nfrom GeneradorAutomatas import GeneradorAFN\n\nclass AnalizadorSintacticoER():\n\n def __init__(self, lexico):\n self._lexico = lexico\n self._generador = GeneradorAFN()\n\n def analizar(self,afn):\n analisisValido = self.E(afn)\n\n if analisisValido:\n #Se verifica que todos los caracteres de la cadena hayan sido analizados\n if self._lexico.getToken() != 0:\n analisisValido = False\n\n return analisisValido\n\n def E(self,afn):\n if self.T(afn):\n if self.Ep(afn):\n return True\n return False\n\n def Ep(self,afn):\n token = self._lexico.getToken()\n afn2 = AFN('AFN2')\n\n if token >= 0:\n if token == TokenER.UNION:\n if self.T(afn2):\n afn.automata(self._generador._generarUnion([afn,afn2]))\n\n if self.Ep(afn):\n return True\n return False\n\n else:\n return False\n\n if token > 0:\n self._lexico.rewind()\n return True\n\n def T(self,afn):\n if self.C(afn):\n if self.Tp(afn):\n return True\n\n return False\n\n def Tp(self,afn):\n token = self._lexico.getToken()\n afn2 = AFN('AFN2')\n\n if token >= 0:\n if token == TokenER.CONCATENACION:\n if self.C(afn2):\n afn.automata(self._generador._generarConcatenacion([afn,afn2]))\n\n if self.Tp(afn):\n return True\n return False\n else:\n return False\n\n if token > 0:\n self._lexico.rewind()\n return True\n\n def C(self,afn):\n if self.F(afn):\n if self.Cp(afn):\n return True\n return False\n\n def Cp(self,afn):\n token = self._lexico.getToken()\n\n if token >= 0:\n if token == TokenER.CERRADURA_POSITIVA:\n afn.automata(self._generador._generarCerraduraPositiva([afn]))\n\n if self.Cp(afn):\n return True\n return False\n\n elif token == TokenER.CERRADURA_KLEENE:\n afn.automata(self._generador._generarCerraduraKleene([afn]))\n\n if self.Cp(afn):\n return True\n return False\n\n elif token == TokenER.OPCIONAL:\n afn.automata(self._generador._generarOpcional([afn]))\n\n if self.Cp(afn):\n return True\n return False\n\n else:\n return False\n\n if token > 0:\n self._lexico.rewind()\n return True\n\n def F(self,afn):\n token = self._lexico.getToken()\n\n if token >= 0:\n if token == TokenER.PARENTESIS_IZQUIERDO:\n if self.E(afn):\n token = self._lexico.getToken()\n \n if token == TokenER.PARENTESIS_DERECHO:\n return True\n\n return False\n\n elif token == TokenER.SIMBOLO:\n afn.automata(self._generador._generarAutomata(self._lexico.getUltimoLexemaValido()))\n return True\n \n else:\n return False\n\n self._lexico.rewind()\n return True\n\nclass TokenER(object):\n\n #Constantes referentes a los token de las ER\n UNION = 10\n\n CONCATENACION = 20\n\n CERRADURA_POSITIVA = 30\n\n CERRADURA_KLEENE = 40\n\n OPCIONAL = 50\n\n SIMBOLO = 60\n\n PARENTESIS_IZQUIERDO = 70\n\n PARENTESIS_DERECHO = 71","repo_name":"LaloValle/AnalizadorLex","sub_path":"AnalizadorSintactico.py","file_name":"AnalizadorSintactico.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15867810390","text":"from m5.objects import *\nfrom Benchmarks import *\nfrom m5.util import *\n\nclass CowIdeDisk(PARDg5VIdeDisk):\n image = CowDiskImage(child=RawDiskImage(read_only=True),\n read_only=False)\n def childImage(self, ci):\n self.image.child.image_file = ci\n\nclass MemBus(PARDSystemXBar):\n badaddr_responder = BadAddr()\n default = Self.badaddr_responder.pio\n\ndef x86IOAddress(port):\n IO_address_space_base = 0x8000000000000000\n return IO_address_space_base + port\n\ndef connectX86ClassicSystem(x86_sys, numCPUs):\n # Constants similar to x86_traits.hh\n IO_address_space_base = 0x8000000000000000\n pci_config_address_space_base = 0xc000000000000000\n interrupts_address_space_base = 0xa000000000000000\n APIC_range_size = 1 << 12;\n\n x86_sys.membus = MemBus()\n\n # North Bridge\n x86_sys.iobus = PARDg5VIOHub()\n x86_sys.bridge = Bridge(delay='50ns')\n x86_sys.iobus.attachRemappedMaster(x86_sys.bridge)\n x86_sys.bridge.slave = x86_sys.membus.io_port\n # Allow the bridge to pass through:\n # 1) kernel configured PCI device memory map address: address range\n # [0xC0000000, 0xFFFF0000). (The upper 64kB are reserved for m5ops.)\n # 2) the bridge to pass through the IO APIC (two pages, already contained in 1),\n # 3) everything in the IO address range up to the local APIC, and\n # 4) then the entire PCI address space and beyond.\n x86_sys.bridge.ranges = \\\n [\n AddrRange(0xC0000000, 0xFFFF0000),\n AddrRange(IO_address_space_base,\n interrupts_address_space_base - 1),\n AddrRange(pci_config_address_space_base,\n Addr.max)\n ]\n\n x86_sys.membus.io_ranges = \\\n [\n AddrRange(0xC0000000, 0xFFFF0000),\n AddrRange(IO_address_space_base,\n interrupts_address_space_base - 1),\n AddrRange(pci_config_address_space_base,\n Addr.max)\n ]\n x86_sys.membus.memory_ranges = [AddrRange(0, 0xBFFFFFFF)]\n\n # Create a bridge from the IO bus to the memory bus to allow access to\n # the local APIC (two pages)\n x86_sys.apicbridge = Bridge(delay='50ns')\n x86_sys.apicbridge.slave = x86_sys.iobus.master\n x86_sys.apicbridge.master = x86_sys.membus.slave\n x86_sys.apicbridge.ranges = [AddrRange(interrupts_address_space_base,\n interrupts_address_space_base +\n numCPUs * APIC_range_size\n - 1)]\n\n # connect the io bus\n x86_sys.cellx.attachIO(x86_sys.iobus)\n\n x86_sys.system_port = x86_sys.membus.slave\n\n\ndef makePARDg5VSystem(mem_mode, numCPUs = 1, mdesc = None):\n self = PARDg5VSystem()\n\n if not mdesc:\n # generic system\n mdesc = SysConfig()\n self.readfile = mdesc.script()\n\n self.mem_mode = mem_mode\n self.mem_ranges = [AddrRange(mdesc.mem())]\n\n # Platform\n self.cellx = CellX()\n\n # Create and connect the busses required by each memory system\n connectX86ClassicSystem(self, numCPUs)\n\n self.intrctrl = IntrControl()\n\n # IDE-0\n ide0_disk0 = CowIdeDisk(driveID='master')\n ide0_disk0.childImage(mdesc.disk())\n self.cellx.ide0 = PARDg5VIdeController(\n disks=[ide0_disk0],\n pci_func=0, pci_dev=6, pci_bus=0,\n InterruptLine = 10,\n InterruptPin = 1)\n self.cellx.ide0.pio = self.iobus.master\n self.cellx.ide0.config = self.iobus.master\n self.cellx.ide0.dma = self.iobus.slave\n\n # IDE-1\n ide1_disk0 = CowIdeDisk(driveID='master')\n ide1_disk0.childImage(mdesc.disk())\n self.cellx.ide1 = PARDg5VIdeController(\n disks=[ide1_disk0],\n pci_func=0, pci_dev=7, pci_bus=0,\n InterruptLine = 11,\n InterruptPin = 1)\n self.cellx.ide1.pio = self.iobus.master\n self.cellx.ide1.config = self.iobus.master\n self.cellx.ide1.dma = self.iobus.slave\n\n # IDE-2\n ide2_disk0 = CowIdeDisk(driveID='master')\n ide2_disk0.childImage(mdesc.disk())\n self.cellx.ide2 = PARDg5VIdeController(\n disks=[ide2_disk0],\n pci_func=0, pci_dev=8, pci_bus=0,\n InterruptLine = 12,\n InterruptPin = 1)\n self.cellx.ide2.pio = self.iobus.master\n self.cellx.ide2.config = self.iobus.master\n self.cellx.ide2.dma = self.iobus.slave\n\n # IDE-3\n ide3_disk0 = CowIdeDisk(driveID='master')\n ide3_disk0.childImage(mdesc.disk())\n self.cellx.ide3 = PARDg5VIdeController(\n disks=[ide3_disk0],\n pci_func=0, pci_dev=9, pci_bus=0,\n InterruptLine = 13,\n InterruptPin = 1)\n self.cellx.ide3.pio = self.iobus.master\n self.cellx.ide3.config = self.iobus.master\n self.cellx.ide3.dma = self.iobus.slave\n\n self.boot_osflags = 'earlyprintk=ttyS0 console=ttyS0 lpj=7999923 ' + \\\n 'root=/dev/sda1'\n self.kernel = binary('x86_64-vmlinux-2.6.28.4')\n\n return self\n","repo_name":"LvNA-system/PARD-gem5","sub_path":"configs/common/XFSConfig.py","file_name":"XFSConfig.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"3"} +{"seq_id":"42784542103","text":"import requests\nfrom io import StringIO\nimport pandas as pd\nimport numpy as np\n\nimport datetime\nimport time\n\ndef crawl_all(date):\n r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + str(date).split(' ')[0].replace('-','') + '&type=ALL')\n print(r.text)\n ret = pd.read_csv(StringIO(\"\\n\".join([i.translate({ord(c): None for c in ' '}) \n for i in r.text.split('\\n') \n if len(i.split('\",')) == 17 and i[0] != '='])), header=0)\n print(ret)\n ret = ret.set_index('證券代號')\n ret['成交金額'] = ret['成交金額'].str.replace(',','')\n ret['成交股數'] = ret['成交股數'].str.replace(',','')\n return ret\n\ndef crawl_stock(date, stockNo):\n r = requests.post('http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=csv&date=' + str(date) + '&stockNo=' + str(stockNo))\n # print(r.text)\n ret = pd.read_csv(StringIO(\"\\n\".join([i.translate({ord(c): None for c in ' '}) \n for i in r.text.split('\\n')\n if len(i.split('\",')) == 10 and i[0] != '='])), header=0)\n \n #ret = pd.read_csv(r.text)\n # print(ret)\n ret = ret.set_index('日期')\n ret['成交金額'] = ret['成交金額'].str.replace(',','')\n ret['成交股數'] = ret['成交股數'].str.replace(',','')\n return ret\n\"\"\"\n#撈每一隻股票資料\n#test = crawl_all(\"2020-02-21 11:38:35.231800\")\n\n#撈特定日期, 特定股票\ntest = crawl_stock(\"20200101\",\"2330\")\ndf = pd.DataFrame(test)\n\n#全部資料:\n#df.index[0] \n#日期 \n#df.iloc[:,0] to df.iloc[:,7]\n#成交股數, 成交金額, 開盤價, 最高價, 最低價, 收盤價, 漲跌價差, 成交筆數\nprint(df)\n\n#某一天的收盤價\nprint(str(df.index[0]) + \"日, 成交張數=\" + str(int(df.iloc[0,0])/1000) +\", 收盤價=\" + str(df.iloc[0,5]))\n\n\n#to-do: 指標, 趨勢, 周月K\n\"\"\"","repo_name":"YooooR/Stock_Backtesting","sub_path":"getStock.py","file_name":"getStock.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38166199462","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport numpy as np\n\nfrom models.losses import FocalLoss, RegL1Loss, RegLoss, RegWeightedL1Loss,th_mdn_loss_ind,th_mdn_loss_dense\nfrom models.utils import _tranpose_and_gather_feat\nfrom models.decode import multi_pose_decode\nfrom models.utils import _sigmoid, flip_tensor, flip_lr_off, flip_lr\nfrom utils.debugger import Debugger\nfrom utils.post_process import multi_pose_post_process\nfrom utils.oracle_utils import gen_oracle_map\nfrom .base_trainer import BaseTrainer\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport os\n\ndef add_cbar(fig,ax,_im):\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(_im, cax=cax)\n\nIND_TO_KP={0:'nose',1:'l_eye',2:'r_eye',3:'l_ear',4:'r_ear',\n 5:'l_shoulder',6:'r_shoulder',7:'l_elbow',8:'r_elbow',\n 9:'l_wrist',10:'r_wrist',11:'l_hip',12:'r_hip',\n 13:'l_knee',14:'r_knee',15:'l_ankle',16:'r_ankle'}\n \nclass MultiPoseLoss(torch.nn.Module):\n def __init__(self, opt):\n super(MultiPoseLoss, self).__init__()\n self.crit = FocalLoss()\n self.crit_hm_hp = FocalLoss()\n if opt.mdn:\n self.crit_kp = th_mdn_loss_dense if opt.dense_hp else \\\n th_mdn_loss_ind\n else:\n self.crit_kp = torch.nn.L1Loss(reduction='sum') if opt.dense_hp else \\\n RegWeightedL1Loss()\n self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \\\n RegLoss() if opt.reg_loss == 'sl1' else None\n self.opt = opt\n\n def forward(self, outputs, batch,global_step,tb_writer):\n opt = self.opt\n hm_loss, wh_loss, off_loss = 0, 0, 0\n hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0\n\n loss_stats = {}\n for s in range(opt.num_stacks):\n output = outputs[s]\n output['hm'] = _sigmoid(output['hm'])\n if opt.hm_hp:\n output['hm_hp'] = _sigmoid(output['hm_hp'])\n \n if opt.eval_oracle_hmhp:\n output['hm_hp'] = batch['hm_hp']\n if opt.eval_oracle_hm:\n output['hm'] = batch['hm']\n if opt.eval_oracle_kps:\n if opt.dense_hp:\n output['hps'] = batch['dense_hps']\n else:\n output['hps'] = torch.from_numpy(gen_oracle_map(\n batch['hps'].detach().cpu().numpy(), \n batch['ind'].detach().cpu().numpy(), \n opt.output_res, opt.output_res)).to(opt.device)\n if opt.eval_oracle_hp_offset:\n output['hp_offset'] = torch.from_numpy(gen_oracle_map(\n batch['hp_offset'].detach().cpu().numpy(), \n batch['hp_ind'].detach().cpu().numpy(), \n opt.output_res, opt.output_res)).to(opt.device)\n\n if opt.mdn:\n V=torch.Tensor((np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0).astype(np.float32)).float().cuda()\n\n hm_loss += self.crit(output['hm'], batch['hm'])[0] / opt.num_stacks\n\n if opt.mdn:\n mdn_logits = output['mdn_logits']\n #mdn_logits.shape: torch.Size([2, 3, 128, 128])\n if opt.mdn_dropout > 0 and opt.epoch 0:\n wh_loss += self.crit_reg(output['wh'], batch['reg_mask'],\n batch['ind'], batch['wh'])[0] / opt.num_stacks\n if opt.reg_offset and opt.off_weight > 0:\n off_loss += self.crit_reg(output['reg'], batch['reg_mask'],\n batch['ind'], batch['reg'])[0] / opt.num_stacks\n if opt.reg_hp_offset and opt.off_weight > 0:\n hp_offset_loss += self.crit_reg(\n output['hp_offset'], batch['hp_mask'],\n batch['hp_ind'], batch['hp_offset'])[0] / opt.num_stacks\n if opt.hm_hp and opt.hm_hp_weight > 0:\n hm_hp_loss += self.crit_hm_hp(\n output['hm_hp'], batch['hm_hp'])[0] / opt.num_stacks\n\n loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \\\n opt.off_weight * off_loss + opt.hp_weight * hp_loss + \\\n opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss\n \n loss_stats.update({'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss, \n 'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss,\n 'wh_loss': wh_loss, 'off_loss': off_loss})\n return loss, loss_stats\n\nclass MultiPoseTrainer(BaseTrainer):\n def __init__(self, opt, model, optimizer=None):\n super(MultiPoseTrainer, self).__init__(opt, model, optimizer=optimizer)\n \n def _get_losses(self, opt):\n loss_states = ['loss', 'hm_loss', 'hp_loss', 'hm_hp_loss', \n 'hp_offset_loss', 'wh_loss', 'off_loss']\n loss = MultiPoseLoss(opt)\n return loss_states, loss\n\n def debug(self, batch, output, iter_id):\n opt = self.opt\n reg = output['reg'] if opt.reg_offset else None\n hm_hp = output['hm_hp'] if opt.hm_hp else None\n hp_offset = output['hp_offset'] if opt.reg_hp_offset else None\n dets = multi_pose_decode(\n output['hm'], output['wh'], output['hps'], \n reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=opt.K)\n dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])\n\n dets[:, :, :4] *= opt.input_res / opt.output_res\n dets[:, :, 5:39] *= opt.input_res / opt.output_res\n dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])\n dets_gt[:, :, :4] *= opt.input_res / opt.output_res\n dets_gt[:, :, 5:39] *= opt.input_res / opt.output_res\n for i in range(1):\n debugger = Debugger(\n dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)\n img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)\n img = np.clip(((\n img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)\n pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())\n gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())\n debugger.add_blend_img(img, pred, 'pred_hm')\n debugger.add_blend_img(img, gt, 'gt_hm')\n\n debugger.add_img(img, img_id='out_pred')\n for k in range(len(dets[i])):\n if dets[i, k, 4] > opt.center_thresh:\n debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],\n dets[i, k, 4], img_id='out_pred')\n debugger.add_coco_hp(dets[i, k, 5:39], img_id='out_pred')\n\n debugger.add_img(img, img_id='out_gt')\n for k in range(len(dets_gt[i])):\n if dets_gt[i, k, 4] > opt.center_thresh:\n debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],\n dets_gt[i, k, 4], img_id='out_gt')\n debugger.add_coco_hp(dets_gt[i, k, 5:39], img_id='out_gt')\n\n if opt.hm_hp:\n pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())\n gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())\n debugger.add_blend_img(img, pred, 'pred_hmhp')\n debugger.add_blend_img(img, gt, 'gt_hmhp')\n\n if opt.debug == 4:\n debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))\n else:\n debugger.show_all_imgs(pause=True)\n\n def save_result(self, output, batch, results):\n reg = output['reg'] if self.opt.reg_offset else None\n hm_hp = output['hm_hp'] if self.opt.hm_hp else None\n hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None\n dets = multi_pose_decode(\n output['hm'], output['wh'], output['hps'], \n reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)\n dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])\n \n dets_out = multi_pose_post_process(\n dets.copy(), batch['meta']['c'].cpu().numpy(),\n batch['meta']['s'].cpu().numpy(),\n output['hm'].shape[2], output['hm'].shape[3])\n results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0]","repo_name":"alivaramesh/MixtureDenseRegression","sub_path":"src/lib/trains/multi_pose.py","file_name":"multi_pose.py","file_ext":"py","file_size_in_byte":10730,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"3"} +{"seq_id":"20855791316","text":"import torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.transforms as transforms\n\nimport numpy as np\nimport os\nfrom PIL import Image\n\n\nclass Monet2Photo(Dataset):\n def __init__(self, dataset='monet2photo', mode='train'):\n super(Monet2Photo, self).__init__()\n\n self.transforms = transforms.Compose([\n transforms.Resize(286),\n transforms.RandomCrop(256),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),\n ])\n\n self.base_dir = './dataset/{}'.format(dataset)\n\n self.A_path = os.path.join(self.base_dir, '{}A'.format(mode))\n self.A_list = os.listdir(self.A_path)\n\n self.B_path = os.path.join(self.base_dir, '{}B'.format(mode))\n self.B_list = os.listdir(self.B_path)\n\n def __getitem__(self, index):\n img_A = Image.open(os.path.join(self.A_path, self.A_list[index]))\n img_B = Image.open(os.path.join(self.B_path, self.B_list[index]))\n\n if self.transforms is not None:\n img_A = self.transforms(img_A)\n img_B = self.transforms(img_B)\n\n data = {\n 'A': img_A,\n 'pathA': os.path.join(self.A_path, self.A_list[index]),\n 'B': img_B,\n 'pathB': os.path.join(self.B_path, self.B_list[index])\n }\n\n return data\n\n def __len__(self):\n if len(self.A_list) < len(self.B_list):\n return len(self.A_list)\n return len(self.B_list)\n\n\ndef load_data(args):\n train_data = Monet2Photo()\n train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n\n test_data = Monet2Photo(mode='test')\n test_loader = DataLoader(test_data, batch_size=args.batch_size, num_workers=args.num_workers)\n\n return train_loader, test_loader\n\n\ndef main():\n\n from config import load_args\n args = load_args()\n load_data(args)\n\n\n# if __name__ == '__main__':\n# main()","repo_name":"leaderj1001/minimal-cyclegan","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"34756290994","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2022/2/18 15:24\n# @Author : Wang Zixv\n# @Site : \n# @File : get_line_hog.py.py\n# @Software: PyCharm\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# Set the default figure size\nplt.rcParams['figure.figsize'] = [17.0, 7.0]\n\n# Load the image\nimage = cv2.imread('./images/triangle_tile.jpeg')\n\n# Convert the original image to RGB\noriginal_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# Convert the original image to gray scale\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Print the shape of the original and gray scale images\nprint('The original image has shape: ', original_image.shape)\nprint('The gray scale image has shape: ', gray_image.shape)\n\n# Display the images\nplt.subplot(121)\nplt.imshow(original_image)\nplt.title('Original Image')\nplt.subplot(122)\nplt.imshow(gray_image, cmap='gray')\nplt.title('Gray Scale Image')\nplt.show()\n","repo_name":"PepperTree-wang/python_tools","sub_path":"mini_model_test/get_y_line_hog/get_line_hog.py","file_name":"get_line_hog.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25470217145","text":"# desenvolvido por João Paulo de Souza e Leandro Souza Pinheiro\r\n\r\nfrom tkinter import *\r\nimport tkinter.messagebox\r\nimport tictactoe\r\n\r\ntk = Tk()\r\ntk.title(\"Jogo da velha\")\r\nttt = tictactoe.TicTacToe()\r\nbclick = True\r\nflag = 0\r\nmessage = ''\r\nbuttons = []\r\n\r\n# método para desabilitar o clique de um botão\r\ndef disableButton(button):\r\n button.configure(state=DISABLED)\r\n\r\n# método que desabilita o clique de todos os botões com\r\n# exceção do reset\r\ndef disableAllButtons():\r\n for i in buttons:\r\n disableButton(i)\r\n\r\n# método para pegar o botão clicado e realizar a escrita em tela\r\ndef btnClick(button, position):\r\n # define que vai fazer uso das variaveis globais\r\n global bclick, flag\r\n # verifica se o texto do botão está vazio e caso esteja com a flag de clique como\r\n # true, indica que pode ser clicado e que o valor deve ser X\r\n if button[\"text\"] == \" \" and bclick == True:\r\n # seta o texto do botão com X\r\n button[\"text\"] = \"X\"\r\n # troca o valor da flag de clique para que venha o O\r\n bclick = False\r\n # soma a quantidade de botões clicados\r\n flag += 1\r\n # pega a mensagem de retorno do prolog caso exista\r\n message = ttt.pl('x', position[0], position[1])\r\n # da mesma forma, caso o texto do botão esteja vazio e a flag de clique\r\n # esteja como false\r\n elif button[\"text\"] == \" \" and bclick == False:\r\n # seta o texto O no botão\r\n button[\"text\"] = \"O\"\r\n # troca a flag de clique\r\n bclick = True\r\n # soma o botão clicado\r\n flag += 1\r\n # e pega a mensagem do prolog caso exista\r\n message = ttt.pl('o', position[0], position[1])\r\n\r\n # se a mensagem for diferente de none, indica que alguem\r\n # ganhou ou deu empate\r\n if message is not None:\r\n # então mostra na tela e bloqueia os botões\r\n tkinter.messagebox.showinfo(\"Jogo da velha\", message)\r\n disableAllButtons()\r\n \r\n disableButton(button)\r\n\r\n# método para o reset\r\ndef reset():\r\n bclick = True\r\n # recria o tabuleiro\r\n createBoard()\r\n # chama o método de jogada no prolog com a flag de reset como 1, ou seja, indicando o reset\r\n ttt.pl('n', 0, 0, 1)\r\n\r\n# método para criar os botões e o tabuleiro\r\ndef createBoard():\r\n button1 = Button(tk, text=\" \", font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button1, (0,0)))\r\n button1.grid(row=3, column=0)\r\n buttons.append(button1)\r\n\r\n button2 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button2, (0, 1)))\r\n button2.grid(row=3, column=1)\r\n buttons.append(button2)\r\n\r\n button3 = Button(tk, text=' ',font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button3, (0, 2)))\r\n button3.grid(row=3, column=2)\r\n buttons.append(button3)\r\n\r\n button4 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button4, (1, 0)))\r\n button4.grid(row=4, column=0)\r\n buttons.append(button4)\r\n\r\n button5 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button5, (1, 1)))\r\n button5.grid(row=4, column=1)\r\n buttons.append(button5)\r\n\r\n button6 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button6, (1, 2)))\r\n button6.grid(row=4, column=2)\r\n buttons.append(button6)\r\n\r\n button7 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button7, (2, 0)))\r\n button7.grid(row=5, column=0)\r\n buttons.append(button7)\r\n\r\n button8 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button8, (2, 1)))\r\n button8.grid(row=5, column=1)\r\n buttons.append(button8)\r\n\r\n button9 = Button(tk, text=' ', font='Arial 20 bold', bg='#0f4e63', fg='white', height=4, width=8, command=lambda: btnClick(button9, (2, 2)))\r\n button9.grid(row=5, column=2)\r\n buttons.append(button9)\r\n\r\n button10 = Button(tk, text='Reset', font='Arial 20 bold', bg='#0f4e63', fg='white', height=1, width=28, command=lambda: reset())\r\n button10.grid(row=7,column=0, columnspan=3)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n createBoard()\r\n tk.mainloop()\r\n\r\n","repo_name":"weth767/tictactoe_prolog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7500541185","text":"import logging\nlogging.basicConfig(level=logging.DEBUG, format='%(message)s')\n\ndef calculator():\n action_type = int(input(\"Podaj działanie, posługując się odpowiednią liczbą: 1 Dodawanie, 2 Odejmowanie, 3 Mnożenie, 4 Dzielenie:\"))\n num_one = float(input('Podaj składnik 1:'))\n num_two = float(input('Podaj składnik 2:'))\n if action_type == 1:\n variable_name = \"Dodaję\"\n variableone = logging.info('%s %s i %s', variable_name, num_one, num_two)\n result = num_one + num_two\n elif action_type == 2:\n variable_name = \"Odejmuję\"\n variableone = logging.info('%s %s i %s', variable_name, num_one, num_two)\n result = num_one - num_two\n elif action_type == 3:\n variable_name = \"Mnożę\"\n variableone = logging.info('%s %s i %s', variable_name, num_one, num_two)\n result = num_one * num_two\n else:\n variable_name = \"Dzielę\"\n variableone = logging.info('%s %s i %s', variable_name, num_one, num_two)\n result = num_one / num_two\n\n print(\"Wynik to %s\" % result)\ncalculator()","repo_name":"LukaszRadominski/Calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13856061528","text":"#-*- coding:utf-8 -*-\nimport random\nfrom sys import version_info\nprint(random.randint(0,2))#get random int value,range is 0,1,2\n'''\n树莓派:微型电脑,麻雀虽小五脏俱全,电脑的各种硬件它全有,可以弄来耍,淘宝上百来块钱\n\n\n\n简单介绍shell脚本:\n创建test.sh文件,在test.sh文件里写几个shell命令,例如:\nls\npwd\nls /\n保存test.sh\n给test.sh添加可执行权限:chmod u+x test.sh\n此时这个shell脚本test.sh就可执行了:test.sh+Enter --> 会依次执行里面的那几个命\n\n\n\n\n\npython与java和C语言等编程语言对比:\nC语言和java会先编译成二进制文件,再执行;\npython不用编译直接执行:其实python也编译了,只不过是编译一行执行一行,类似英语行业的同声传译\npython是胶水语言,因为虽然python写代码的时候快,但是运行的时候相对较慢,可与C语言或java等混合使用,以弥补运行慢的缺点\npython代码是动态的,���到运行到了,才知道要执行什么代码,参考下面的‘通过input获取匿名函数’案例\n\n\npython运行代码--> python x.py\npython+Enter --> 进入python2的交互模式,也会显示python的版本信息;在交互模式里可以写python语法,注意在交互模式写变量名则是打印变量,exit()退出交互模式。\nipython+Enter --> 进入python2的高级交互模式,推荐的交互模式,与普通交互模式对比:可以写shell命令且支持自动补全;exit退出,注意不是exit()。\npython3运行代码-->python3 x.py\npython3+Enter --> 进入python3的交互模式。\nipthon3+Enter --> 进入python3的高级交互模式,参考python2的高级交互模式。推荐的交互模式。\n运行python代码的另一种方式,在代码第一行写入执行时的python解释器路径(#!/user/bin/python),编辑完后需要对此python文件添加'x'权限。\n这样在执行.py时,只需./hello.py然后Enter即可执行。\n\n\n\n\npython2和python3的区别:\n1:\npython2有中文问题,无论中文出现在注释里还是代码里,用python2运行都会报错;\npython3不存在中文问题;\n解决方案有两种:推荐第二种方案\n#coding=utf-8\n#-*- coding:utf-8 -*- \n将上面任意一种解决方案放到代码的第一行即可。\n2:\n注意:input方法python2和python3完全不同;参考'通过input获取匿名函数'案例\npython3中,是指以字符串的方式返回用户的输入结果;\n但是在python2中,则把用户输入的内容作为python语法语句返回,\n例如:a = input(\"请输入\"),如果用户输入了hello,则实际效果是hello以表达式的方式赋值给a,即a=hello,因此会因为hell未定义报错\n再例如,如果用户输入1+2,则a=3,会把1+2计算后返回。\n其实在python2中并不用input接收用户输入,而是使用raw_input(\"请输入\")代替。\n3:\n<> not equal,python2 have it,but python3 don't have it.\n4:\nget all keys composed collection\npython2 return:['name','age','addr']\npython3 return:dict_keys(['name','age','addr']),infact is object\nget all values composed collection\npython2 return:['18','jack','ah']\npython3 retrun:dict_values(['18','jack','ah']),infacit is object\n5:\nprint\"this is python2 print\"\n python2's print method no need bracket,python3 must use bracket.\n6:\nsuper:python2-->super(SonClassName,self);python3-->super() no para\n7:\n同时捕获多个异常:\npython2:Error1,Error2;\npython3:(Error1,Error2),in fact is a tuple.\n任何异常都捕获:\npython2:except: that's all.but except Exception is OK? take care about it.\npython3:except Exception.\n8:\npython2:range(1,4),如果你print(range(1,4))会输出[1,2,3],因此range(1,10000000000)会内存溢出\npython3:range(1,4),如果你print(range(1,4))会输出'range(1,4)',因此range(1,10000000000)不会内存溢出\n实际上,python3对此做了优化,当你来取的时候我才给你值,取一个给一个,防止内存溢出:\nrange(1,5)[2]可以取出,list = range(1,5),list[0]也可以取出\n\n\n\n查看python都有哪些关键字:\nipython3交互模式下:\n第一步:import keyword 导入keywor包\n第二步:keyword.kwlist 调用包里的内容,查看所有关键字\n\n'''\n\n\n\n'''\n数据类型参考文档\npython是弱类型语言,类似js,无需声明变量类型,系统会根据你给定的值,自动确定类型\ntype(变量):返回当前变量的数据类型,看下面的例子,也可以用isinstance(instance,class)代替,eg:isinstance(123,int)\nstr转换为int:b=int(a),数据类型转换参考文档\nint转换未str:b=str(a) string = str(任何类型)\npython格式化输出,参考文档:百度网盘-->前段-在学-->前段工具-->Linux\n\nNone just like java's null.eg:obj == None;obj = None\n\n\"\" None 0 [] {} () --> False \n'''\n#基本类型\nprint(type(123)==int)\nprint(type(\"123\")==str)\nprint(type([])==list)\nprint(type((1,3,4))==tuple)\nprint(type({})==dict)\n#对象类型,使用types中的常亮\nprint(\"#############类型##########\")\nimport types\ndef mytp():\n\tpass\nprint(type(mytp)==types.FunctionType)\nprint(type(abs)==types.BuiltinFunctionType)\nprint(type(lambda x: x)==types.LambdaType)\nprint(type((x for x in range(10)))==types.GeneratorType)\n\n\n#print\"this is python2 print\" python2's print method no need bracket\nname = \"张学友\"\nage=19\nadd = \"成都\"\nbfloat = 3.14\nbooy = True\nprint(\"输出变量age=%d\"%age)\nprint(\"多种类型同时输出:%s%d%s\"%(name,age,add))\nprint(type(age))\nprint(type(add))\nprint(type(bfloat))\nprint(type(booy))\nstrvalue = \"23\"\nintvalue = int(strvalue)#字符串类型的数字,转为int类型的数\n\nprint(\"字符串转为数字:%d\"%intvalue)\n\n\n\n\n\n'''\n接收用户输入,返回字符串结果\n注意:input方法python2和python3完全不同;\npython3中,是指以字符串的方式返回用户的输入结果;\n但是在python2中,则把用户输入的内容作为python语法语句返回,\n例如:a = input(\"请输入\"),如果用户输入了hello���则实际效果是hello以表达式的方式赋值给a,即a=hello,因此会因为hell未定义报错\n再例如,如果用户输入1+2,则a=3,会把1+2计算后返回。\n其实在python2中并不用input接收用户输入,而是使用raw_input(\"请输入\")代替。\n'''\n#result = input(\"请输入您的姓名:\")\n#print(\"接受输入结果:%s\"%result)\nprint(age)#can output variable directly\n#print(\"any-content\",end=\"\")#line tip end with \"\",instead of line feed,in fact end=\"anycontent\" is ok,python2 can not use end.\nprint(\"\")#line feed,start a new line\n\n\n'''\npython语言中一般不用{}大括号区分代码块,因此强制使用tab键区分代码块,例如if语句中,if后面联系几个tab开头的语句都是if语句的内容,直到某个\n不是tab开头的语句才结束,elif和else语句也是一样\nin python have no do-while,switch-case\n'''\nif age<18:\n\tprint(\"我未成年\")\n\tprint(\"你呢\")\nelif age>18:\n\tprint(\"我成年了\")\n\tprint(\"你呢\")\nelif age>100:\n\tpass#pass类似于java的todo,暂时不写代码,先放在这里占个位置\nelse:\n\tprint(\"我刚好18岁\")\n\tprint(\"我是大人了\")\nprint(\"我不属于if-else\")\n\n\n\n'''\n= 赋值符号,在python中,该符号永远是指左边的变量指向右边的表达式所对应的内存区域,例如:a=10,a指向10的内存区域;b=\"hello\",b指向'hello'的内存区域;c=[1,2],c指向[1,2]的内存区域。\n+ 加法\n- 减法\n* 乘法,数学乘法和字符串乘法,所谓字符串乘法:\"H\"*5=\"HHHHH\"\n/ 除法,返回实际的数学结果,例如:5/2=2.5\n// 除法,返回商,例如:5//2=2\n% 除法,返回余数,例如:5%2=1\n** 求次方,例如2**2=2的2次方,2**3等于2的3次方,2××100=2的100次方\n\na+=1 just like a=a+1,但是二者是有区别的,当a指向可变类型时,例如a=[0],则a+=[1,2]是对a原来指向的[0]内存区域进行修改,a的指向不变;而a=a+[1,2]是开辟新的内存区域存放a+[1,2,],然后让a指向的这块新的内存区域,即a的指向改变。其他-= *= /=等等也是同样的道理\n-=\n*=\n/=\n//=\n%=\n**=\nbut attention this:\na*=2+4+6 just like a=a*(2+4+6) not like a=(a*2+4+6)\nin python have no a++,a--\n\n> greater-than sign \n< less-than sign\n>= greater-equal sign \n<= less-equal sign\n!= not equal\n<> not equal,python2 have it,but python3 don't have it.\n== equal\n\n\nor just like java's ||\nand just like java's &&\nnot just like java's !\n\n\n\n'''\nprint(\"字符串乘法:\"+\"H\"*5)\n\namulti = 2\namulti*=2+4+6\nprint(amulti)#a=a*(2+4+6)\n\naoan = \"a\"\nboan = \"b\"\ncoan = \"c\"\nif aoan == \"a\" or boan == \"b\":\n\tprint(\"how to use 'or'\")\n\nif aoan == \"a\" and boan == \"b\":\n\tprint(\"how to use 'and'\")\n\nif not(aoan == \"a\" and boan == \"c\"):\n\tprint(\"how to use 'not'\")\n\nif (aoan == \"a\" or boan == \"b\") and coan == \"c\":\n\tprint(\"logic-bracket\")\n\n\n#python交换两个变量的值\nprint(\"****************交换两个变量的********************\")\nva =3\nvb =4\nva=va+vb\nvb=va-vb\nva=va-vb\nprint(\"va=%d,vb=%d\"%(va,vb))\n\nvc=8\nvd=9\nvc,vd=vd,vc\nprint(\"vc=%d,vd=%d\"%(vc,vd))\n\n\n\n'''\n表达式:\na,b,c = 3 --> 报错,并不是a=3,b=3,c=3,而是3不是可迭代的对象,报错\na,b,c = \"345\" --> a=\"3\",b=\"4\",c=\"5\",\"345\"是可迭代的对象\na,b,c = [3,4,5] --> a=3,b=4,c=5 [3,4,5]是可迭代的对象\n'''\n\n\n\n'''\n可变类型和不可变类型\nimmutable variable:numbers,string,tuple\nvolatile variable:others are all volatile,eg:list,dictionary\ndictionary's key can be any immutalbe variable,but cann't be volatile variable,because volatile have no hashcode,immutable have hashcode.yy\n'''\n\n\n\n\n'''\nstr\neg:name = \"hellokitty\"\norder:0,1,2,3,4,5,6,7,8,9\ninverted order:-10,-9,-8,-7,-6,-5,-4,-3,-2,-1\n\nget one char:\nname[index];\neg:name[0]<-->name[-10], name[9]<-->name[-1]\n\nget substring:\nname[startIndex,endIndexNext,oneStepIndexCount]\nstartIndex:have connection with order.can ignore,if the order is from left to right,then default value is 0.\n\tif the order if from right to left,then default value is -1.\nendIndexNext:have connection with order.can ignore,no matter what order,the default value is until the last one;\n\tif you type it,according to order, the resulted substring not contain name[endIndexNext].\noneStepIndexCount:can ignore,default value is 1,if it's value is positive number then step from left to right;\n\tif it's value is negative number then step from right to left.\n\twhat's mean of it's value:index is 0,1,2,3,4,5,6,7. if it's value is 2 then get 0,2,4,6.if it's value is\n\t3 then get 0,3,6,if it's value is 1 then get 0,1,2,3,4,5,6,7.\n'''\ns1451 = \"abcdefgh\"\n#string can not splice with int value.eg:s1451+10 is error\nprint(\"str's length:%d\"%len(s1451))#get str's length:len(strvar)\ns1515 = \"ijk\"\n#string splice string with +\ns1518 = s1451+s1515\nprint(s1518)\nprint(\"str splice:%s\"%(s1451+s1515))\nprint(s1451[3])#get char by index\nprint(s1451[1:])#bcdefgh\nprint(s1451[::-1])\nprint(s1451[-2::-1])\nprint(s1451[:1:-1])\n\n\n\n\n'''\nstring's functions\nstr = \"abc\"\n\nstr.find(\"sub\") --> from left find the first appear child string,if find then return the index of the child string's first char,if not find then retrun -1.\nstr.rfind(sub) --> from right find the first appear child string.\nstr.index(\"sub\") --> reference find(\"sub\"),but if not find will report error,not retrun -1.\nstr.rindex(\"sub\") --> reference rfind(\"sub\"),but if not find will report error,not return -1.\n\nstr.count(\"sub\") --> return the count of \"sub\" appear in str.\n\nstr.replace(\"oldsub\",\"newsub\") --> use \"newsub\" replace all \"oldsub\" in str.attentiion that string is immutable variable,this function will return a new string,old string never change.\nstr.replace(\"oldsub\",\"newsub\",1) --> the third para means that how many \"oldsub\" will be replace if oldsub appear many count.\n\nstr.split(\"anychar\") --> according given char split string into a list.eg:\"aa bb cc\".split(\" \")-->[\"aa\",\"bb\",\"cc\"]\nstr.split() --> without para,according invisible char in str to split.such as /t /n...\n\nstr.capitalize() --> let str's first letter uppercase.attention only str's first letter,not every word's first letter.\nstr.title() --> let every word's first letter uppercase in str.\n\nstr.lower() --> all letters in str will convert into lowercase.\nstr.upper() --> all letters in str will convert into uppercase.\n\nstr.startswith(\"sub\") --> if str start with \"sub\" return True,then return False.\nstr.endswith(\"sub\") --> reference startswith(\"sub\").\n\nstr='hello'\nstr.center(100) --> redefine a string which length is 100,and the str is in center of it.eg:' hello '\nstr.ljust(100) --> redefine a string which length is 100,and the str is in left of it. eg:'hello '\nstr.rjust(100) --> redefine a string which length is 100,and the str is in right of it. eg:' hello'\n\nstr.lstrip() --> strip left blank char of str then return.\nstr.rstrip() --> strip right blank char of str then return.\nstr.strip() --> strip both left and right blank char of str then return. \n\nstr.partition(\"sub\") --> from left find the first \"sub\",then according this \"sub\" split str.\"sub\"'s left is a part,maybe \"\";\"sub\" is a part;\"sub\"'s right is a part,maybe \"\";then composed these three string as a tuple return. eg:\"a b c\".partition(\"a\")-->(\"\",\"a\",\" b c\");\"a b c\".partition(\"b\")-->(\"a \",\"b\",\" c\");\"a b c\".partition(\"c\")-->(\"a b \",\"c\",\"\")\nstr.rpartition(\"sub\") --> reference str.partition(\"sub\"),but attention this is from right find the first \"sub\".\n\nstr=\"a\\nb\\nc\\n\"\nstr.splitlines() --> according lines split str,then composed as a list return. eg:[\"a\",\"b\",\"c\"]\n\nstr.isalpha() --> if all chars of str is letter return True,then return False.\nstr.isdigit() --> if all chars of str is digit return True,then return False.\nstr.isalnum() --> if all chars of str is letter or digit or letter and digit return True.then return False.\nstr.isspace() --> if str only contain blanks return True,then return False.\n\nnames=[\"aa\",\"bb\",\"cc\"]\nnames=\"aabbcc\"\nstr=\"-\"\nstr.join(names) --> \"aa-bb-cc\",\"aoaobobococ\"\n\n\n'''\nprint(\"*******************string function*************************\")\nspstr = \"hello world, I'm comeing\"\nprint(spstr.split(\" \"))\n\nprint(spstr.ljust(10))\n\nprint(spstr.partition(\"hello\"))\n\njostr=\"aabbcc\"\nfstr = \"o\"\nprint(fstr.join(jostr))#fouocok\n\n'''\nwhile circulation\nin python have no do-while,switch-case\n'''\ncir = 0\nwhile cir<=10:\n\tprint(cir)\n\tcir+=1\t\n\n\n'''\nfor circulation\nstring can be cycle by for\nbreak;continue;\n\n\nrange(startIndex,endIndexNext,oneStepIndexCount) reference string\npython2:range(1,4),如果你print(range(1,4))会输出[1,2,3],因此range(1,10000000000)会内存溢出\npython3:range(1,4),如果你print(range(1,4))会输出'range(1,4)',因此range(1,10000000000)不会内存溢出\n实际上,python3对此做了优化,当你来取的时候我才给你值,取一个给一个,防止内存溢出:\nrange(1,5)[2]可以取出,list = range(1,5),list[0]也可以取出\n\njudge one obj is can iterable:\nfrom collections import Iterable\nisinstance('abc', Iterable) return Ture or False.\n\ntranverse the index and the element at the same time:\nenumerate(iterableObj)\n'''\nprint(\"**************for circulation**************\")\nstrfor = \"hellokitty\"\nfor temp in strfor:\n\tprint(temp)\nelse:#if you use 'break' in for circulation,'else' will not run.then else will stil run.\n\tprint(\"for's else\")\n\nfor i in range(0,5):\n\tprint(i)\n\nranlist = range(0,10,2)\nprint(ranlist[2]) \nprint(range(1000,1100,2)[2])\nprint(range(800,804))\nprint(range(4))#[0,1,2,3]\n\n#列表生成式:forlist = [expression forcirculation... filter]\nprint(\"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\nforlist = [i for i in range(4)]\nprint(forlist)#[0,1,2,3]\nforlist = [i+10 for i in range(10,20,3)]\nprint(forlist)#[20, 23, 26, 29]\nforlist = [\"yes\" for i in range(0,4)]\nprint(forlist)#[\"yes\",\"yes\",\"yes\",\"yes\"]\nforlist = [i for i in range(0,10) if i%2==0]\nprint(forlist)#[0,2,4,6,8]\nforlist = [(i,j) for i in range(0,2) for j in range(0,3)]\nprint(forlist)#[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]\n\n#列表生成器:generator,参考廖雪峰的博客,适合数据量很大的列表,一边取一边生成\n\n#迭代器:Iterator,迭代器是惰性序列,取到时才加载,但是可通过list(迭代器)一次性加载,注意可迭代的对象iterableObj不一定是迭代器,参考廖雪峰的博客\n\n#judge one obj is can iterable\nfrom collections import Iterable\nprint(isinstance('abc', Iterable))#True\nprint(isinstance([1,2,3],Iterable))#True\nprint(isinstance(123,Iterable))#False\n\n#tranverse the index and the element at the same time.\nprint(\"**************enumerate*****************\")\nmindList = [\"a\",\"b\",\"c\",\"d\"]\nfor index,ele in enumerate(mindList):\n\tprint(\"index=%d,ele=%s\"%(index,ele))\n\nmindtuple = (\"aa\",\"bb\",\"cc\")\nfor index,ele in enumerate(mindtuple):\n\tprint(\"tuple:index=%d,ele=%s\"%(index,ele))\n\n\n\n'''\n列表:collection\ncan store multi type value.\n'''\n#list[startIndex,endIndexNext,oneStepIndexCount]:reference string \ninList = [0,1,2,3,4,5,6,7]\nprint(inList[0])#0\nprint(inList[-1])#7\nprint(inList[::-1])#[7,6,5,4,3,2,1,0]\nprint(inList[1:5])#[1,2,3,4]\nprint(inList[-2:1:-1])#[6,5,4,3,2]\n\n#append element at the tip\nmlist = [\"a\",20,3.14,\"123\"]#multi type value\nmlist.append(\"fuck\")\n\n#insert element at specific position,the original element at the positon move to backward\nmlist.insert(2,200)#the first parameter is the index which will be inserted.\nprint(mlist)\n\n#two can collection joint with \"+\"\nslist = [1,2,3]\nalist = mlist+slist\nprint(alist)\n\n#one collection can append at the tip of the other collection\nlist1 = [1,2,3]\nlist2 = [4,5,6]\nlist3 = list1.extend(list2)#extend() have no return value\nprint(list1)#[1,2,3,4,5,6] been append\nprint(list2)#[4,5,6] not change\nprint(list3)#None extend() have no return value\n\n\n#delete and return the last element\ndlist = [1,2,3,4,5,6,7,8]\ndee = dlist.pop()\nprint(dlist)#[1,2,3,4,5,6,7]\nprint(dee)#8\n\n#delete specific element.if the collection have some the same element,then just delete the first appear element\n#remove(para):the parameter is element not index,and have no return value\nrmList = [1,2,2,3]\nrmList.remove(2)\nprint(rmList)#[1,2,3]\n\n#delete element by index\ndelList = [1,2,3,4,5]\ndel delList[1]\nprint(delList)#[1,3,4,5]\n\n#replace element by index\nfixList = [1,2,3,4,5]\nfixList[2] = \"abc\"\nprint(fixList)#[1,2,'abc',4,5]\n\n#search element from a collection\n#judge which element is in a collection\nseaList = [\"abc\",\"d\",\"ef\",\"ghi\"]\nif \"d\" in seaList:\n\tprint(\"'d' is in seaList\")\nif \"fuck\" not in seaList:\n\tprint(\"'fuck' not in seaList\")\n\n#collection's for circulation \nforList = [111,222,333,444,555]\nfor temp in forList:\n\tprint(temp)\n\n#collection's length\nlenList = [1,2,3,4,5]\nprint(\"collection's length:%d\"%len(lenList))\n\nprint(\"************* sort ***************\")\n#number have default sort rule\nsortList=[34,9,123,0,82,44]\nsortList.sort()\nprint(sortList)\n\n#string have default sort rule too.\nstrSortList=[\"cc\",\"tttt\",\"b\",\"23\",\"lll\",\"89\",\"wwww\",\"aaa\",\"1\"]\nstrSortList.sort()\nprint(strSortList)\n\n#custom sort rule:\n#give sort() a anonymous function para,this anonymous function have only one para,inner sort() will call this anonymous function and list'seach element will as para transfer into this anonymous function,and this anonymous function's expression will decide the sort rule for list.\nmapList=[{'name':'jace','age':18},{'name':'zs','age':22},{'name':'body','age':90},{'name':'fuck','age':4}]\nmapList.sort(key=lambda kv:kv['name'])\nprint(mapList)\n\n\n\n\n'''\nset:cann't have repeated element.\n'''\nprint(\"(@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\")\naset = {1,2,3,1,2}\nprint(aset)#{1,2,3} auto wipe off repeat element \n\n\n\n\n'''\n元祖:tuple,can not change.\nin add,delete,modify,search\ntuple just can operate 'search'.can not 'add','delete','modify'.\n\nempty tuple:()\none element tuple:(1,).attention there is a comma.\n'''\nmyt = (1,2,3)\n#get tuple's lenth\nprint(\"tuple's lenth:%d\"%len(myt))\n\n#search someone element vlaue\nprint(\"get a element from a tuple:%d\"%myt[1])\n\n#unpack:mya=myt[0],myb=myt[1],myb=myt[2]\n#left variable count must equal tuple's lenth,eg:mya,myb=myt is error; \nmya,myb,myc = myt\nprint(\"tuple's every element:%d,%d,%d\"%(mya,myb,myc))\n\n#for circulation\nfor temp in myt:\n\tprint(temp)\n\n\n\n\n'''\nset,list,tuple convert each other.\n'''\n#set --> list\ns2list = list(aset)\nprint(s2list)#[1,2,3]\n#set --> tuple\nl2tuple = tuple(aset)\nprint(l2tuple)\n#list --> set\naset = set(s2list)\nprint(aset)#{1,2,3}\n#list --> tuple\nl2tuple = tuple(s2list)\nprint(l2tuple)#(1,2,3)\n#tuple --> list\ns2list = list(l2tuple)\nprint(s2list)#[1,2,3]\n#tuple --> set\naset = set(l2tuple)\nprint(aset)#{1,2,3}\n\n\n\n\n\n\n'''\n字典dictionary:just like java's map.nothing on order,this is deference of collection\nkey-value\ndictionary's key can be any immutalbe variable,but cann't be volatile variable,because volatile have no hashcode,immutable have hashcode.yy\n'''\ninfos = {\"name\":\"jack\",\"addr\":\"chengdu\",\"age\":18}\nprint(infos)#output all\nprint(\"my name is %s,my address is %s,I'm %d.\"%(infos[\"name\"],infos[\"addr\"],infos[\"age\"]))#out every key\nprint(\"dictionary's length:%d\"%len(infos))\n\n#add\ninfos[\"tel\"] = \"10086\"\nprint(infos)\n\n#delete,if have no this key,then report an error\ndel infos[\"addr\"]\nprint(infos)\n\n#modify\ninfos[\"tel\"] = \"10010\"\nprint(infos)\n\n#search\n#if have no this key,then report an error\noInfo = infos[\"name\"]\nprint(oInfo)\n#if have no this key ,then will not report error\ngInfo1 = infos.get(\"tel\")\ngInfo2 = infos.get(\"fuck\")\nprint(gInfo1)#10010\nprint(gInfo2)#None,not error\n\nmaps = {\"name\":\"jack\",\"age\":18,\"addr\":\"ah\"}\n#traverse the dict directly,in face is traverse all keys.\nfor key in maps:\n\tprint(key)#age,name,addr\n\n#get all keys composed collection\n#python2 return:['name','age','addr']\n#python3 return:dict_keys(['name','age','addr']),infact is object\nkeyList = maps.keys()\nprint(keyList)\n#no matter python2 or python3, the result can for circulation certainly.\nfor temp in keyList:\n\tprint(temp)\n\n#get all values composed collection\n#python2 return:['18','jack','ah']\n#python3 retrun:dict_values(['18','jack','ah']),infact is object\nvalList = maps.values()\nprint(valList)\n#no matter python2 or python3,the result can for circulation certainly.\nfor temp in valList:\n\tprint(temp)\n\n#get k-v composed collection,each k-v composed as a tuple,all tuple composed as a collection.\n#python2 retrun:[('age', 18), ('name', 'jack'), ('addr', 'ah')]\n#python3 return:dict_items([('age', 18), ('name', 'jack'), ('addr', 'ah')]),infact is object\nkvList = maps.items()\nprint(kvList)\n#no matter python2 or python3,the result can for circulation certainly.\nfor tupleTemp in kvList:\n\tprint(tupleTemp)#tupleTemp is a tuple.\nfor ta,tb in kvList:#tuple auto unpack into ta and tb.\n\tprint(ta)\n\tprint(tb)\n\nprint(\"&&&&&&&&&&&&&&&&&&&&&&liao xuefeng&&&&&&&&&&&&&&&&&&&&&&&&&&&&\")\ndl = {'a': 1, 'b': 2, 'c': 3}\nfor key in dl:\n\tprint(key)\n\nfor myk in dl.keys():\n\tprint(myk)\n\n\"\"\"\n函数:function;golbal variable;local variable\nbut attention tab indent,just like if-else.\ndon't need define return type,because you can return any type.\ndef funName(necessaryParas,defaultParas,*tuple,**dictionary),attention these paras's order.the only difference between defaultPara and necessaryPara is defaultPara has a default value,you can omit it.others are the same.\neg:\ndef funName(a,b,c=1,d=2,*args,**kwargs):\n\tpass\nfunName(1,2,3,4,,5,6,name=\"jack\",age=18)\na=1,b=2,c=3,d=4,args=(5,6),kwargs={\"name\":\"jack\",\"age\":18}\n\nfunction paras's transfer:formal para point to actual para's memory block.\n\npython code execute order just like html run from top to bottom:\n1:define global variable\n2:define function and reference global variable\n3:call the function \n\nprint function's manual:\nhelp(funName)\n\n高阶函数:参考廖雪峰的博客\nmap()函数接收两个参数,一个是函数,一个是Iterable,map将传入的函数依次作用到序列的每个元素,并把结果作为新的Iterator返回。\nreduceI()把一个函数作用在一个序列[x1, x2, x3, ...]上,这个函数必须接收两个参数(x1,x2),reduce把结果继续和序列的下一个元素(x3)做累积计算\nfilter():和map()类似,filter()也接收一个函数和一个序列。和map()不同的是,filter()把传入的函数依次作用于每个元素,然后根据返回值是True还是False决定保留还是丢弃该元素。\nsorted():自定义排序规则\n\n函数对象类型的变量,下面有例子:\ndef fun():\n\tpass\na = fun\na() --> 可执行\n\n函数作为返回值,下面有例子,参考廖雪峰的博客\n\n装饰器decorator,下面有例子,参考廖雪峰的博客,参考图片decorator1.png\n\n偏函数,参考廖雪峰的博客,当函数的参数个数太多,需要简化时,使用functools.partial可以创建一个新的函数,这个新函数可以固定住原函数的部分参数,从而在调用时更简单。\n\n\"\"\"\n\ndef funName():\n\tprint(\"function body\")\nfunName()\n\ndef funName2():\n\tprint(\"f2\")\n\treturn \"f2\"\nfunResult2 = funName2()\nprint(funResult2)\n\ndef funName3(a,b):\n\tprint(a+b)\n\treturn a+b\nprint(funName3(1,2))\n\n#retrun tuple;'return a,b,c' is equivalent to 'return (a,b,c)'. \ndef funName4(a,b,c):\n\treturn a,b,c #return (a,b,c)\nfun4R = funName4(1,2,3)\nprint(fun4R)\n\nvarg = 10#global variable\ndef fun5():\n\tvarl = 9#local variable\n\tprint(varg)#reference global variable directly\nfun5()\n\nvarg2=11\ndef fun6():\n\tglobal varg2#tell anyone this is a global variable,but you can omit this line.\n\tprint(varg2)\nfun6()\n\n\nvarg3=12\ndef fun7():\n\t#global varg3 can omit this line\n\tvarg3=13#reference global varg3\n\tprint(varg3)\nfun7()\n\n#namedPara\ndef fun8(a,b):\n\tprint(a+b)\nfun8(b=1,a=2)\nfun8(a=14,b=15)\nfun8(16,b=17)\n#fun8(a=18,19) error:if have anonymousPara, namedPara must after anonymousPara.\n\n#defaultPara must after necessaryPara.\ndef fun9(a,b,c=3):\n\tprint(a+b+c)\nfun9(1,2)#1+2+3=6\nfun9(1,2,4)#1+2+4=7\nfun9(1,2,c=4)#1+2+4=7\nfun9(a=1,b=2,c=4)#1+2+4=7\n\ndef fun10(a,b,c=3,d=4):\n\tprint(a+b+c+d)\nfun10(1,2)#1+2+3+4=10\nfun10(1,2,d=5)#1+2+3+5=11\nfun10(1,2,c=5)#1+2+5+4=12\nfun10(1,2,5,6)#1+2+5+6=14\nfun10(1,2,c=5,d=6)#1+2+5+6=14\n\n#args-->tuple\n#tuple used to complement the front of\ndef fun11(a,b,c=3,d=4,*args):\n\tprint(\"a=%d,b=%d,c=%d,d=%d\"%(a,b,c,d))\n\tprint(args)\nfun11(1,2)#a=1,b=2,c=3,d=4,args=()\nfun11(1,2,d=5)#a=1,b=2,c=3,d=5,args=()\nfun11(1,2,c=5)#a=1,b=2,c=5,d=4,args=()\n#fun11(1,2,c=5,d=6,7,8) error:namedPara must after anonymousPara\nfun11(1,2,5,6,7,8)#a=1,b=2,c=5,d=6,args=(7,8)\nfun11(1,2,5,6,7)#a=1,b=2,c=5,d=6,args=(7,)\ntargs=(5,6,7)\nprint(\"~~~~~~~~~~~~~~~~~~~~~~~~\")\nfun11(1,2,*targs)#a=1,b=2,c=5,d=6,args=(7,) targs used to complement previous all necessaryParas and defaultParas,if which necessaryPara not given,then fetch from targs according order;if which defaultPara not given,don't use default value,but fetch from trags according to order;the rest of element in targs composed to args as a final tuple. \nprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\nfun11(*targs)#a=5,b=6,c=7,d=4,args=()\n\n#kwargs-->dictionary\ndef fun12(a,b,c=3,d=4,*args,**kwargs):\n\tprint(\"a=%d,b=%d,c=%d,d=%d\"%(a,b,c,d))\n\tprint(args)\n\tprint(kwargs)\nfun12(1,2,3,4,5,6,name=7,age=8)#a=1,b=2,c=3,d=4,args=(5,6),kwargs={'name':7,'age':8}\nfun12(1,2,3,4,5,6)#a=1,b=2,c=3,d=4,args=(5,6),kwargs={}\nfun12(1,2,3,4,name=5,age=6)#a=1,b=2,c=3,d=4,args=(),kwargs={'name':5,'age':6}\nfun12(1,2,name=5,age=6)#a=1,b=2,c=3,d=4,args=(),kwargs={'name':5,'age':6}\nfun12(1,2)#a=1,b=2,c=3,d=4,args=(),kwargs={}\ntupleargs = (5,6)\ndictargs={\"add\":7,\"tel\":8}\nfun12(1,2,3,4,*tupleargs,**dictargs)#a=1,b=2,c=3,d=4,args=(5,6),kwargs={'add':7,'tel':8}\nprint(\"**************************\")\nfun12(1,2,*tupleargs,**dictargs)#a=1,b=2,c=5,d=6,args=(),kwargs={'add':7,'tel':8}\nfun12(*tupleargs,**dictargs)#a=5,b=6,c=3,d=4,args=(),kwargs={'add':7,'tel':8}\n\n#函数变量\ndef myvarfun():\n\tprint(\"函数变量\")\nmvr = myvarfun#变量复制\nmvr()#可指向,mvr和myvarfun指向同一个函数对象\n\n#函数作为返回值\ndef myrefun():\n\tdef myinfun():\n\t\tprint(\"我是函数,我被返回\")\n\treturn myinfun\nmyin = myrefun()\nprint(myin)#myin指向对应函数对象的内存地址,.myinfun at 0x7f6af4351840>\nmyin()\n\nprint(\"*****************************装饰器start***************************\")\nimport functools\n#装饰器decorator,无参数,now不再指向原来的now函数,而是指向wrapper函数\n\ndef log(func):\n#\t@functools.wraps(func)\n\tdef wrapper(*args,**kwargs):\n\t\tprint(\"now函数之前:%s\"%func.__name__)#now\n\t\treturn func(*args,**kwargs)\n\treturn wrapper\n\n@log\ndef now(a):\n\tprint(\"now函数,%d\"%a)\n\nprint(\"1函数名%s\"%now.__name__)#wrapper,如果写@functools.wraps(func)则仍是now\nnow(110)#now(a)-->wrapper(a)-->log(now)(a)-->now=log(now)-->now==wrapper\nprint(\"2函数名%s\"%now.__name__)#wrapper,如果写@functools.wraps(func)则仍是now\n\n#装饰器decorator,有参数,cut不再指向原来的cut函数,而是指向wrapper函数\ndef log(text):\n\tdef decorator(func):\n#\t\t@functools.wraps(func)\n\t\tdef wrapper(*args,**kwargs):\n\t\t\tprint(\"有参数%d,cut函数之前:%s\"%(text,func.__name__))\n\t\t\treturn func(*args,**kwargs)\n\t\treturn wrapper\n\treturn decorator\n\n@log(10086)\ndef cut(b):\n\tprint(\"cut函数体:%d\"%b)\n\nprint(\"3函数名:%s\"%cut.__name__)#wrapper,如果写@functools.wraps(func)则仍是cut\ncut(120)#cut(120)<-->log(10086)(cut)(120)\nprint(\"4函数名:%s\"%cut.__name__)#wrapper,如果写@functools.wraps(func)则仍是cut\n\nprint(\"****************************装饰器end*******************************\")\n\n\n\n'''\n引用:just like java\nimmutable variable:numbers,string,tuple\nvolatile variable:others are all volatile,eg:list,dictionary\ndictionary's key can be any immutalbe variable,but cann't be volatile variable,because volatile have no hashcode,immutable have hashcode.yy\n'''\nvaradd = 10\nprint(id(varadd))#output variable's memory address\n\n'''\n匿名函数:anonymous function\nfunc = lambda paras..:expression\nfunc point to the anonymous function's body.\nafter ':' must be expression,because anonymous function default have retrun value,the return value is the expression's result.\nfunc = lambda x,y:x+y\nfunc(1,2),the retrun value is x+y.\n'''\nanonyfunc = lambda x,y:x+y\nanonyresult = anonyfunc(1,2)\nprint(anonyresult)\n\n\n#custom sort rule:\n#give sort() a anonymous function para,this anonymous function have only one para,inner sort() will call this anonymous function and list'seach element will as para transfer into this anonymous function,and this anonymous function's expression will decide the sort rule for list.\nmapList1=[{'name':'jace','age':18},{'name':'zs','age':22},{'name':'body','age':90},{'name':'fuck','age':4}]\nmapList1.sort(key=lambda kv:kv['name'])\nprint(mapList1)\n\n#通过input获取匿名函数\n#this explained that python is a dynamic language,you never know what code will be run until run it.\n#you can input:lambda x,y:x+y\n'''\nanonf = input(\"input a anonymous function:\")\ndef testAnonf(a,b,func):\n\tresult = func(a,b)\n\tprint(result)\nfrom sys import version_info\nif version_info.major == 2:#python2\n\ttestAnonf(21,22,anonf)\nif version_info.major == 3:#python3\n\tanonf = eval(anonf)#eval():transform general string to python language,eg:make '[1,2,3]' to [1,2,3] and so on.\n\ttestAnonf(31,32,anonf)\n'''\n\n\n\n\n'''\noperate file's step:\n1:open file.\nf=open(\"filePath\",\"fileVisitMode\") reference file_visit_mode.png,f point to the file which is opened.if fileVisitMode is 'r' then you cam omit it.and if the filePath is not exist and fileVisitMode is 'r' will report error.\neg:f=open(\"1.txt\",\"r\");f=open(\"/yzjgogo/home/1.txt\",\"w\")\n\n2:operate file.\nsuch as read it,write it.\n\n3:close file.\nattention:after operate file finish,you must close it.don't forget it.\nf.close()\n'''\nprint(\"***************file about*****************\")\n#wirte content into file.\nmyfile = open(\"f.txt\",\"w+\")\nwcon = myfile.write(\"abcdefghigklmnopqrst\")#write(\"content\") return the char count of write into file.after wirte the point move to after the last char.so point at \"\",the empyt char.\nprint(wcon)\nmyfile.close()\n\n#read(count) read() :read content from file.\n#open file with \"r\" mode,the default point of file is at the first char position.\nmyfile = open(\"f.txt\",\"r\")\nprint(myfile.read(1))#零,the point move to next char.\nprint(myfile.read(1))#一,the point move to next char.\nprint(myfile.read(1))#二,the point move to next char.\nprint(myfile.read(1))#三,the point move to next char.\nprint(myfile.read(1))#四,the point move to next char.\nprint(myfile.read())#五六七八九十,after read the point move to after the last char.so point at \"\",the empty char.\nprint(myfile.read(1))#''\nmyfile.close()\n\n#f.readline()-->from the point,read one line from f and return as a string.\n#f.readline(count)-->if this function have para that mean read how many char from current line,attention the you most get all of the char in current line,cann't get char from other line.\nmyfile = open(\"./f.txt\",\"r\")\nprint(myfile.readline(13))\nprint(myfile.read(1))\nmyfile.close()\n\n#f.readlines()-->all the lines of f composed as a list and return.\nmyfile = open(\"./f.txt\",\"r\")\nprint(myfile.readlines())#the point move to the next of the last char.means that point at \"\",the empty char.\nmyfile.close\n\n#f.tell()-->tell you where the point is at now,return the index of char in the file's content.attention:the point's position start from 0 not 1\nmyfile = open(\"f.txt\",\"r\")\nprint(myfile.tell())\nmyfile.close()\n\n#set the point's position,with this you can move the point's position anywhere.\n#f.seek(offset,from),offset is byte count,so becareful chinese char and in python2 can be negative number but python3 can not.from==0-->at the first char's index;from=1-->at the current position of file;from=2-->at the next postion of the last char \"\",is empty char\n#eg:seek(2,0)-->from the first char move two byte count.\nprint(\"&&&&&&&&&&&&&&&&&\")\nmyfile = open(\"f.txt\",\"r\")\nmyfile.seek(0,0)\nprint(myfile.tell())\nprint(myfile.read(1))\nmyfile.close()\n\n#file and folder\nimport os\n#os.rename(\"./f.txt\",\"./ff.txt\")#change file's name\n#os.rename(\"myfolder\",\"mfolder\")#change folder's name\n#os.remove(\"a.txt\")\n#os.remove(\"af\")#can not delete folder\n#os.rmdir(\"af\")#delete folder\n#os.mkdir(\"dir\")\n\n#os.chdir(\"/home/yzjgogo\")#change current path\n#print(os.getcwd())#get current path\n#print(os.listdir(\"/home/yzjgogo/python\"))#get which path's all file name and folder name then composed as a list return.\n\n\n'''\n面向对象OOP\n\nself --> just like java's 'this' keyword,means current object.but maybe it can't ignore?\ncls --> class's obj,everyting is object.\nsuper:python2-->super(SonClassName,self);python3-->super() no para\n__dict__ --> is an obj's attribute,not a function;it's a dict,used to store self.attr.eg:{'name':'HshiQi','age':8}\nclass Dog(objec):\n\tdef __init__(self,name,age):\n\t\tself.name = name\n\t\tself.age = age\n\n\njust like java's constructor:__new__(cls)(create a obj) and __init__(self)(init created obj)\n__new__(cls,...) --> top father object's function.used to create a obj.the para is 'class obj' point to the class.you also can define more para used to give __init__(sefl,...),if you rewrite this function you must return the object which is created by top father's __new__(cls,...)\n__init__(self,...) --> top father object's function.call inner __new__(cls),used to init the obj which created through __new__(cls),such as define object's attribute in there.\n\n__str__(self) --> just like java's toString().you must return String.\n\n__repr__(self) --> 两者的区别���__str__()返回用户看到的字符串,而__repr__()返回程序开发者看到的字符串,也就是说,__repr__()是为调试服务的。但是通常__str__()和__repr__()代码都是一样的,所以,有个偷懒的写法:\nclass Student(object):\n def __init__(self, name):\n self.name = name\n def __str__(self):\n return 'Student object (name=%s)' % self.name\n __repr__ = __str__\n\n__iter__(self) --> 参考廖雪峰的博客,配合__next__(self),让对象可以for循环\n\n__getitem__() --> 参考廖雪峰的博客,可以for循环的对象,实现像list那样按下表取元素,参考__iter__(self)\n\n__getattr__() --> 参考廖雪峰的博爱,正常情况下,当我们调用类的方法或属性时,如果不存在,就会报错,要避免这个错误,除了可以加上一个score属性外,Python还有另一个机制,那就是写一个__getattr__()方法,动态返回一个属性。 \n\n__call__(self) --> 参考廖雪峰的博客,把实例本身当方法调用。通过callable()函数,我们就可以判断一个对象是否是“可调用”对象.\nclass Student(object):\n def __init__(self, name):\n self.name = name\n\n def __call__(self):\n print('My name is %s.' % self.name)\n调用方式如下:\n>>> s = Student('Michael')\n>>> s() # self参数不要传入\nMy name is Michael.\n\n\n\n__del__(self) --> when the object have no any variable point at it.this function will be call,and delete the object free the memory.if untill execute all the porgram finish,the object still not delete,this function will be call too.because the porgram finish will free memory apce,so the object will be delete too.\n\ndel obj_variable --> make the obj_varibale become undefined,not point at the obj,but the obj still exist. until have no any varibale point at the obj,the obj's __del__() function will be call.\n\nsys.getrefcount(obj_variable) --> return the count of how many varibale point at the obj,the para is the variable of anyone point at the obj.the attention is function's para is a reference of the obj too.so general you should mimus 1.eg:count = sys.getrefcount(obj_variable)-1.\n\ndefine yourself's function,you must at least transfer into 'self' para.\n\nadd instance attribute:\nyou can add attribute through obj instance,eg:obj.name = \"zs\",define name attribute and give value.but this just effect with current obj,not effect with other instance.\n\nadd instance function:\nyou can give current instance add a new function,but just effect with this instance,not effect with other instance.\n>>> def set_age(self, age): # 定义一个函数作为实例方法\n... self.age = age\n...\n>>> from types import MethodType\n>>> s.set_age = MethodType(set_age, s) # 给实例绑定一个方法\n>>> s.set_age(25) # 调用实例方法\n>>> s.age # 测试结果\n25\n\nadd class attribute and function,this will effect with all the instance of this class:\n>>> def set_score(self, score):\n... self.score = score\n...\n>>> Student.set_score = set_score\n\nlimit all the instances's attributes of the class,but not effect the sonclass's instance:\neg:limit all the instances just can have 'name' and 'age' attribute:\nclass Student(object):\n __slots__ = ('name', 'age') #use tuple define it.\n>>> s = Student() # 创建新的实例\n>>> s.name = 'Michael' # 绑定属性'name'\n>>> s.age = 25 # 绑定属性'age'\n>>> s.score = 99 # 绑定属性'score'\nTraceback (most recent call last):\n File \"\", line 1, in \nAttributeError: 'Student' object has no attribute 'score'\n\n@property:参考廖雪峰的博客,Python内置的@property装饰器就是负责把一个方法变成属性调用的,广泛应用在类的定义中,可以让调用者写出简短的代码,同时保证对参数进行必要的检查,这样,程序运行时就减少了出错的可能性。\n\n\nprivate attribute:startwith '__';eg:__name,__secret,you should define setXXX() and getXXX().as a private attribute you just can call it inner an object and through self.xxx call it.\n\nprivate function:startwith '__';eg:__wank();as a private function you just can call it inner an object and through sefl.__XXX call it.\n\nclass attribute:just like java's static attribute,this attribute belong to class,every instance enjoy.you can visit it such as:className.attribute or obj.attribute\n\n\nclass method:just like java's static method,decorate with @classmethod, you must transfer a 'cls' para,means current class's obj,everything is object.you can call it such as:calssName.xxx() or obj.xxx()\n\n\nstatic method:just like java's static method,decorate with @staticmethod,no need transfer any para.this also is difference of calssmethod.you can call it such as:calssName.xxx() or obj.xxx()\n\nisinstance(instance, class)判断某个对象实例是不是某个类的类型\neg:dog = Dog()\nisinstance(dog,Dog) --> True\n也可以判断某个对象是不是多个类型中的一个:\nisinstance([], (list, tuple)),判断[]是不是list或tuple中的任意一个\n\ndir(instance):获取对象的所有属性��方法,以list的方式返回,切list的元素都是字符串\n\n定义对象的len(obj)方法:需要在类中定义__len__(self)函数即可。\nlen(obj) == obj.__len__()\n\n判断对象有没有某个属性或方法以及设置属性\nhasattr(obj, '属性名或方法名') 判断有没有该属性或方法,返回true或false\nsetattr(obj, '属性名', \"jack\") 添加属性或给属性重新赋值,只给某个对象添加属性,不给类添加\ngetattr(obj, '属性名或方法名') 获取属性值或方法名,如果不存在则报错\ngetattr(obj, '属性名或方法名',defaultvalue) 获取属性值或方法名,如果不存在则返回默认值\n\n枚举类:参考廖雪峰的博客。\n\n动态创建类:参考廖雪峰的博客,type()和metaclass元类\n\n'''\nclass Student(object):\n\n\tnum = 0 #class attribute:just like java's static attribute,this attribute belong to class,every instance enjoy.\n\n\tdef __new__(cls,name,age,secret):\n\t\tprint(\"execute__new__\")\n\t\t#return super().__new__(cls)\n\t\treturn object.__new__(cls)\n\n\tdef __init__(self,name,age,secret):\n\t\tprint(\"call init,and define 'name','age','tools' three attribute\")\n\t\tself.name = name\n\t\tself.age = age\n\t\tself.tools = []\n\t\tself.__secret = secret#private attribute\n\t\tStudent.num+=1\n\n\tdef __str__(self):\n\t\tprint(\"call private function:%s\"%self.__wank())#through sefl.__XXX call it.\n\t\treturn \"this student's name is %s,age is %d,he have many tools,such as %s\"%(self.name,self.age,str(self.tools))\n\n\tdef __del__(self):\n\t\tprint(\"I'm __del__ function\")\n\n\tdef __len__(self):#通过len(obj)调用即可\n\t\treturn 10010\n\n\tdef setSecret(self,secret):\n\t\tself.__secret = secret#call private attribute through self,and give it value\n\n\tdef getSecret(self):\n\t\treturn self.__secret\n\n\tdef __wank(self):\n\t\treturn \"like wank\"\n\n\t@classmethod\n\tdef addNum(cls):#class method,you must transfer a cls para,means current class's obj,everything is object.\n\t\tcls.num+=10\n \n\t@staticmethod\n\tdef show():#static method:decorate with @staticmethod,no need transfer any para.this also is difference of calssmethod.\n\t\tprint(\"static method\")\n\n\t#yourself's function,you must at least transfer into 'self' para\n\tdef addTool(self,tool):\n\t\tself.tools.append(tool)\n\t\n\nclass Book:\n\tdef __str__(self):\n\t\treturn \"book\"\nclass Pen:\n\tdef __str__(self):\n\t\treturn \"pen\"\n\n#create a object and output it.\nstu = Student(\"jack\",18,\"a secret\")#create a stu object\n#stu.sex=1 you can add attribute through obj instance\nbook = Book()#create a book object\npen = Pen()#create a pen object\n\nstu.addTool(book)\nstu.addTool(pen)\nprint(stu)\n\nprint(\"#############dir############\")\n#dir(obj)\nprint(dir(\"ABC\"))\nprint(dir(stu))\nprint(dir(object))\nprint(len(stu))\nprint(stu.__len__())\n\n#属性\nprint(hasattr(stu,\"fuck\"))\nsetattr(stu,\"fuck\",\"ok\")\nprint(getattr(stu,\"fuck\"))\nsetattr(stu,\"fuck\",\"ookk\")\nprint(getattr(stu,\"fuck\"))\ngstu = Student(\"nikoo\",88,\"nikoo's secret\")\nprint(getattr(gstu,\"fuck\",\"没有fuck属\"))#说明setattr()只给某个对象添加属性,不给类添加\n\n\n#private attribute and private function.\n#print(stu.__secret) you cann't call private attribute out object\n#print(stu.__wank()) you cann't call private function out object\nprint(\"original secret:%s\"%stu.getSecret())\nstu.setSecret(\"new secret\")\nprint(\"new secret:%s\"%stu.getSecret())\n\n#del reference\n#stu,stua,stub,stuc all point at the same obj.\nimport sys\nstua = stu\nstub = stua\nstuc = stub\nprint(\"current obj's reference count:%d\"%(sys.getrefcount(stu)-1))#4,stu stua stub stuc\ndel stuc#after del the variable will be undefined\nprint(\"current obj's reference count:%d\"%(sys.getrefcount(stu)-1))#4,stu stua stub \ndel stub\nprint(\"current obj's reference count:%d\"%(sys.getrefcount(stu)-1))#4,stu stua \ndel stua\nprint(\"current obj's reference count:%d\"%(sys.getrefcount(stu)-1))#4,stu \ndel stu#after del stu,no variable point the obj,so will call __del__() function delete the obj.\nprint(\"*******invider********\")\n#if until execute all code the obj still not be del,the __del() will be call too,because the program release the memory space the obj del too.\n\n#class attribute\nprint(\"*********calss attribute********\")\nstud = Student(\"jack\",18,\"a secret\")\nprint(stud.num)\nprint(Student.num)\n\n#class method\nStudent.addNum()\nprint(Student.num)\nprint(stud.num)\n\n#static method\nStudent.show()\nstud.show()\n\nprint(isinstance(stud,Student))#True\n\n'''\n继承\n\nsuper:python2-->super(SonClassName,self);python3-->super() no para\n\nthe 'object' is all obj's top father class.\n\nson can rewrite father's function,just need hava the same name,no need have the same paras.\n\ncall father's funciton have two ways:\nFather.XXX(self)\nsuper().XXX() you don't need transfer self para.if you transfer will report error.\n\nprivate attribute can't be extend,but you can through public function indirect call it.\nprivate function can't be extend,but you can through public function indirect call it.\n\n\n'''\nclass Animal(object):#(object) can ignore.\n\tdef __init__(self):\n\t\tself.__name = \"jack\"#private attribute\n\n\tdef __wank(self):#private function\n\t\tprint(\"can wank\")\n\n\tdef getName(self):\n\t\treturn self.__name\n\n\tdef callWank(self):\n\t\tself.__wank()\n\n\tdef eat(self):\n\t\tprint(\"Animal can eat food\")\n\n\tdef run(self,a,b,c):\n\t\tprint(\"Animal can run\")\n\nclass Dog(Animal):\n\tdef bark(self):\n\t\tprint(\"Dog can bark\")\n\n\tdef eat(self):#rewrite father's function\n\t\tAnimal.eat(self)#call father's function\n\t\tprint(\"Dog can eat food too\")\n\n\tdef run(self,d):#rewrite father's function,just need have the same function name,no need have the same paras.\n\t\tif version_info.major == 2:#python2\n\t\t\tsuper(Dog,self).run(1,2,3)\n\t\tif version_info.major == 3:#python3\n\t\t\tsuper().run(1,2,3)#call father's function,you don't need transfer self para,if you transfer will report error.\n\t\tprint(\"Dog can run too %s\"%d)\n\nmydog = Dog()\nmydog.eat()\nmydog.run(\"hahaha\")\n#print(mydog.__name) private attribute can't be extend,but you can through public function indirect call it.\nprint(mydog.getName())\n#mydog.__wank() private function can't be extend,but you can through public function indirect call it.\nmydog.callWank()\n\n'''\n多继承:most of the feature are the same of single extend,multi parent not affect each other.\nmulti parent should best have no the same function.\n\nclass.__mro__ : return the call order,just like the extend order,if multi parent have the same function,then call according to the call order.eg:C-->A-->B-->object,if classA and classB have the same function 'test()',then if you execute 'c.test()',in fact execute A's test() not B's test().\n'''\nclass A(object):\n def testA(self):\n print(\"testA\")\n\nclass B(object):\n def testB(self):\n print(\"testB\")\n\nclass C(A,B):\n pass\n\nobjc = C()\nobjc.testA()\nobjc.testB()\nprint(C.__mro__)#the call order is:C--A--B--object\n\n\n'''\n所谓多态就是指程序中定义的引用变量所指向的具体类型和通过该引用变量发出的方法调用在编程时并不确定,而是在程序运行期间才确定,即一个引用变量倒底会指向哪个类的实例对象,该引用变量发出的方法调用到底是哪个类中实现的方法,必须在由程序运行期间才能决定。因为在程序运行时才确定具体的类,这样,不用修改源程序代码,就可以让引用变量绑定到各种不同的类实现上,从而导致该引用调用的具体方法随之改变,即不修改程序代码就可以改变程序运行时所绑定的具体代码,让程序可以选择多个运行状态,这就是多态性。\n\n 比如你是一个酒神,对酒情有独钟。某日回家发现桌上有几个杯子里面都装了白酒,从外面看我们是不可能知道这是些什么酒,只有喝了之后才能够猜出来是何种酒。你一喝,这是剑南春、再喝这是五粮液、再喝这是酒鬼酒….在这里我们可以描述成如下:\n\n 酒 a = 剑南春\n\n 酒 b = 五粮液\n\n 酒 c = 酒鬼酒\n\n …\n\n 这里所表现的的就是多态。剑南春、五粮液、酒鬼酒都是酒的子类,我们只是通过酒这一个父类就能够引用不同的子类,这就是多态——我们只有在运行的时候才会知道引用变量所指向的具体实例对象。\n'''\n\n\n\n'''\n单例模式:single instance\nall objs created by this class is point at the same memory space.\n'''\nclass Bear(object):\n\t__instance = None\n\t__init_flag = False\n\tdef __new__(cls,name):\n\t\tif cls.__instance == None:\n\t\t\tcls.__instance = object.__new__(cls)\n\t\t\treturn cls.__instance\n\t\telse:\n\t\t\treturn cls.__instance\n\t\n\tdef __init__(self,name):#just init once\n\t\tif Bear.__init_flag == False:\n\t\t\tself.name = name\n\t\t\tBear.__init_flag = True\n\t\t\t\n\tdef show(self):\n\t\tprint(\"sefl.name=%s\"%self.name)\n\nbear = Bear(\"dog bear\")\nprint(id(bear))\nbear2 = Bear(\"beiji bear\")\nprint(id(bear2))\nbear.show()\nbear2.show()\n\n\n'''\n异常\nException is all errors's top father class.\n\nat the same time catch multi error:\npython2:Error1,Error2;\npython3:(Error1,Error2),in fact is a tuple.\n\nno mater what error just catch it:\npython2:except: that's all. but except Exception is OK? take care about it.\npython3:except Exception.\n\n\nexcept Error as result:\ncatch an error and get it's obj,you can output it.\n\nraise xxxError:used to throw a error.\nraise: after catch the error throw it again\n\nelse:\nif have no any error will execute it.\n\nfinally:\nno mater what,'finally' will run always.\n'''\nif version_info.major == 2:#python2\n\tprint(\"this is python2\")\n'''\n\ttry:\n\t\t#mynum+=1\n\t\t#open(\"fuck.txt\")\n\t\t#11/0\n\t\tprint(\"code\")\n\texcept NameError:\n\t\tprint(\"******NameError******\")\n\texcept ZeroDivisionError:\n\t\tprint(\"********ZeroDivisionError************\")\n\texcept IOError as result:#result point at the error obj.\n\t\tprint(\"**********IOError**************\")\n\t\tprint(result)\n\texcept NameError,IOError:#python2,split with , not a tuple\n\t\tprint(\"**********multi error************\")\n\texcept:#python2 catch all errors,just :\n\t\tprint(\"catch all the errors\")\n\telse:# if no any error,'else' will run.\n\t\tprint(\"no any error\")\n\tfinally:#no mater what,'finally' will run always.\n\t\tprint(\"finally execute aways\")\n'''\nif version_info.major == 3:#python3\n\tprint(\"this is python3\")\n\ttry:\n\t\t#mynum+=1\n\t\t#open(\"fuck.txt\")\n\t\t#11/0\n\t\tprint(\"code\")\n\texcept NameError:\n\t\tprint(\"******NameError******\")\n\t\t#raise after catch the error throw it again\n\texcept ZeroDivisionError:\n\t\tprint(\"********ZeroDivisionError************\")\n\texcept IOError as result:#result point at the error obj.\n\t\tprint(\"**********IOError**************\")\n\t\tprint(result)\n\texcept (NameError,IOError):#python3,catch multi a tuple\n\t\tprint(\"**********multi error************\")\n\texcept Exception:#python3 catch all errors.\n\t\tprint(\"catch all the errors\")\n\telse:# if no any error,'else' will run.\n\t\tprint(\"no any error\")\n\tfinally:#no mater what,'finally' will run always.\n\t\tprint(\"finally execute aways\")\n\n\n\n'''\n自定义异常\nException is all error's top father class.\nraise xxxError:used to throw a error.\nraise: after catch the error throw it again\n'''\n\nclass MyError(Exception):\n\tdef __init__(self,errorContent):\n\t\tself.error = errorContent\n\n\tdef __str__(self):\n\t\treturn self.error\n\n#use you diy erro\ntry:\n\tmyerror = MyError(\"you are error\") \n\traise myerror\nexcept Exception as res:\n\tprint(res)\n\t#raise after catch the error throw it again\n\n\n'''\n模块:module\nimport random --> random is a module,when you import the module will execute all it's code.\n\ninstall a module: eg,'pygame' is a popular module \npython2:sudo pip install moduleName\npython3:sudo pip3 install moduleName\nafter install finished,import moduleName and use it.\n\nif you import custom module,current path will generate a '__pycache__' folder,inner this folder have the cache of the module which you just imported.inner the '__pycache__' have the file like 'module1.cpython-34.pyc'.this is a bytecode file also a binary file.among 'module1.cpython-34.pyc' 'cpython' means python interpreter is code by C lanugage,and python's version is 3.4.In fact,when execute python *.py,if *.py import some module,then interpreter will make these module into 'moduleName.cpython-version.pyc' as binary.if you run *.py again then just need read from 'moduleName.cpython-version.pyc',so will very save time,because as a module usual have no problem.\n\n__name__:\ninner a module you can through '__name__' judge what status when this module is running.reference module1.py\nif current module run as a independent program '__name__' is '__main__'\nif current module as a module run depend on other *.py,'__name__' is current module's name.\n\n__all__:is a list,reference module4.py\nif you use 'from module4 import *' import a module, inner the module you can through '__all__' store functions or classes which is can be call in main program.notice:if you use 'import module4' then '__all__' is useless.\n\n包package:package folder contain __init__.py,module5.py,module6.py...some module file,inner __init__.py you shoule define __all__=[\"module5\",\"module6\"] make these modules go into effect.when you 'from packageName import *' the __init__.py will execute all itself code,and you can through 'module5.test5()' call module's functions and classes.maybe have other use way.\n\nmodule publish,install,useage:reference module_publish.png,module_install_use.png\n'''\n#moduleName.__file__ --> return current module's path\nprint(random.__file__)#/usr/lib/python3.4/random.py\n\n#import yourself module\n#way1 recommed!\nimport module1\nmodule1.testM1()\n\n#way2 or you can:\n#from module import test1,test2\n#from module import *\n#not recommend because maybe some module have the same function name.\nfrom module2 import testM2\ntestM2()\n\n#way3 give target module a alias\nimport module3 as mm\nmm.testM3()\n\n#__all_=[\"Test\",\"test1\"] --> see annotation\nfrom module4 import *\nmyt = Test()\nmyt.test()\ntest1()\n#test2() error:because __all__ not contain 'test2'\n\n\n#包package,meybe have other use way.\nfrom package5 import *\nmodule5.test5()\n\n\n'''\nprogram's paras\nimport sys\n\npython3 base.py\nsys.argv --> ['base.py']\npython3 base.py a b c\nsys.argv --> ['base.py','a','b','c']\n\n'''\nprint(sys.argv)\n\n\n'''\nrange\n'''\n\n\n\n\n","repo_name":"yzjgogo/python","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":54245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22675116234","text":"import wandb\nimport transformer_repetition_kit as trk\nimport torch\nfrom math import gcd\nimport traceback\nimport sys\nimport argparse\nfrom tempfile import NamedTemporaryFile\n\n# Parse command line arguments\nparser = argparse.ArgumentParser(description=\"Use weights & biases to tune the transformer model.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"-d\", \"--data\", help=\"data file path\", required=True)\nparser.add_argument(\"-m\", \"--max_evals\", help=\"number of trials\", required=True)\nparser.add_argument('-s', \"--sweep_id\", help=\"Sweep ID for wandb\", required=True)\nparser.add_argument('-t', \"--error_tag\", help=\"The tag we are interested in tracking. (e.g. -, S, REP)\", required=True)\nargs = parser.parse_args()\nconfig = vars(args)\n\n# Set number of evals that we can perform\nmax_evals = int(config['max_evals'])\n\ndef lcm(a, b):\n return abs(a*b) // gcd(a, b)\n\n\ndef main():\n \"\"\"\n This is our main training routine, using weights and biases to track\n our hyperparamters, results, and suggest new hyperparemeters.\n \"\"\"\n try:\n wandb.init()\n wandb.config['data'] = config['data']\n wandb.config['error_tag'] = config['error_tag']\n\n # Set filepaths. We will create temporary files to store the data. This also allows\n # us to train on different hosts.\n ASR_df_filepath = config['data']\n asr_text_file = NamedTemporaryFile(mode='w', prefix='asr', suffix='.txt', delete=True, dir='temp_data')\n ttx_text_file = NamedTemporaryFile(mode='w', prefix='ttx', suffix='.txt', delete=True, dir='temp_data')\n train_file = NamedTemporaryFile(mode='w', prefix='train_sentence', suffix='.csv', delete=True, dir='temp_data')\n valid_file = NamedTemporaryFile(mode='w', prefix='valid_sentence', suffix='.csv', delete=True, dir='temp_data')\n test_file = NamedTemporaryFile(mode='w', prefix='test_sentence', suffix='.csv', delete=True, dir='temp_data')\n\n # Load data\n trk.load_data(ASR_df_filepath=ASR_df_filepath,\n train_filename=train_file.name,\n valid_filename=valid_file.name,\n test_filename=test_file.name,\n asr_text_filepath=asr_text_file.name,\n ttx_text_filepath=ttx_text_file.name)\n\n # Create tokenizer\n if wandb.config['bpe']:\n tokenizer = trk.create_train_bpe_tokenizer(wandb.config['bpe_vocab_size'],\n asr_text_filepath=asr_text_file.name,\n ttx_text_filepath=ttx_text_file.name,\n save_tokenizer=False,\n tokenizer_filename=\"./tokenizer-test.json\"\n )\n else:\n tokenizer = None\n\n # Preprocess data\n train_data, valid_data, test_data, TTX, TRG, ASR, TTX_POS, ASR_POS = trk.produce_iterators(train_file.name,\n valid_file.name,\n test_file.name,\n asr_tokenizer=tokenizer,\n ttx_tokenizer=tokenizer\n )\n\n # Close the temporary files I created earlier\n train_file.close()\n valid_file.close()\n test_file.close()\n asr_text_file.close()\n ttx_text_file.close()\n\n # Test out the tokenizer\n if wandb.config['bpe']:\n output = tokenizer.encode(\"Hello, y'all! How are you 😁 ? [WSP]\")\n print(output.tokens)\n print(output.ids)\n\n # Tell Torch that we want to use the GPU\n device = torch.device('cuda')\n\n # Update params. This is to fet our hidden dimension number.\n wandb.config['hid_dim'] = lcm(\n wandb.config['enc_heads'], wandb.config['dec_heads']) * wandb.config['hid_dim_nheads_multiplier']\n\n # Train the model and get the loss\n model, train_loss, test_loss = trk.model_pipeline(device,\n train_data,\n valid_data,\n test_data,\n TTX,\n TRG,\n ASR,\n TTX_POS,\n ASR_POS,\n config['error_tag']\n )\n\n # Log that loss to Weights & Biases as a Summary metric.\n wandb.run.summary['test_loss'] = test_loss\n wandb.run.summary['train_loss'] = train_loss\n\n torch.cuda.empty_cache() # Needed so we don't kill off GPU Memory\n except Exception as e:\n print(e)\n print(traceback.print_exc(), file=sys.stderr)\n raise e\n\n\nif __name__ == '__main__':\n wandb.agent(config['sweep_id'], function=main, count=max_evals,\n project=\"running_records\", entity=\"witw\")\n","repo_name":"cehrett/running_records","sub_path":"transformer_models/tune_TRK_for_BPE_with_trg_decoding_DEL.py","file_name":"tune_TRK_for_BPE_with_trg_decoding_DEL.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"38020931398","text":"\n# def combinationsSum(array, targetSum):\n \n# res = {}\n# def helper(arr, combination, total, required_sum, start):\n# if total > required_sum:\n# return\n# elif total == required_sum:\n# res[len(combination)]=combination\n# # print(self.res)\n# return\n# else:\n# for i in range(start, len(arr)):\n# helper(arr, combination + [arr[i]], total + arr[i], required_sum, i)\n# helper(sorted(array), [], 0, targetSum, 0)\n# print(res)\n# return res\n\n\n# array=[1,2,3,4,5]\n# combinationsSum(array, 6)\n\ndef combinationSum(candidates, target) :\n def combination_sum(cur_ans, cur_sum, cand_idx):\n if cur_sum >= target:\n if cur_sum == target:\n if len(cur_ans)in ans:\n ans[len(cur_ans)].append(cur_ans)\n else: \n ans[len(cur_ans)]=[cur_ans]\n return\n for i in range(cand_idx, len(candidates)):\n combination_sum(cur_ans + [candidates[i]], cur_sum + candidates[i], i+1)\n ans = {}\n combination_sum([], 0, 0)\n print(ans)\n return ans\n\n\n\n\narray=[7,6,4,-1,1,2]\ncombinationSum(array,16)\ncombinationSum(array,7)\n","repo_name":"johanaluna/Code_Interviews","sub_path":"AlgoExpert/combinationSum.py","file_name":"combinationSum.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4334142020","text":"import tkinter as tk\n\nmain_window = tk.Tk()\n\n\ndef check_hand_enter():\n canvas.config(cursor=\"hand1\")\n\n\ndef check_hand_leave():\n canvas.config(cursor=\"\")\n\n\ncanvas = tk.Canvas(width=200, height=200)\ntag_name = \"polygon\"\n\ncanvas.create_polygon((25, 25), (25, 100), (125, 100), (125, 25), outline='black', fill=\"\", tag=tag_name)\n\ncanvas.tag_bind(tag_name, \"\", lambda event: check_hand_enter())\ncanvas.tag_bind(tag_name, \"\", lambda event: check_hand_leave())\n\ncanvas.pack()\nmain_window.mainloop()","repo_name":"RavioliGitHub/Overlay","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73278108880","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path, include\n\nfrom hikster.core import views_website as core_views\nfrom hikster.hike import views_website as hike_views\nfrom hikster.location import views_website as location_views\nfrom hikster.search import views_website as search_views\n\n\nurlpatterns = [\n path(\"\", core_views.HomeView.as_view(), name=\"home\"),\n path(\"api/\", include(\"hikster.urls_api\")),\n path(\"about/\", core_views.AboutView.as_view(), name=\"about\"),\n path(\"toc/\", core_views.TOCView.as_view(), name=\"toc\"),\n path(\"hikes//\", hike_views.TrailDetailView.as_view(), name=\"trail-detail\"),\n path(\n \"locations//\",\n location_views.LocationDetailView.as_view(),\n name=\"location-detail\",\n ),\n path(\"poi//\", location_views.POIDetailView.as_view(), name=\"poi-detail\"),\n path(\"results/\", search_views.SearchView.as_view(), name=\"search\"),\n]\n\nif settings.DEBUG:\n urlpatterns = (\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + urlpatterns\n )\n","repo_name":"genie4viz/django-vue","sub_path":"hikster/urls_website.py","file_name":"urls_website.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72594581202","text":"import boto3\nimport pandas as pd\nimport datetime\n\n\n\ndef move_safe(s3_client,source_bucket, source_obj, destination_bucket,destination_obj,owrite,exp_hash, trust_hash):\n return_dic={\n 'op': [source_bucket, source_obj,destination_bucket,destination_obj,str(owrite)],\n 'err':'',\n 'warn':'',\n 'destination_etag':'',\n 'source_etag':'',\n 'copy_ok':'False',\n 'datetime':''\n }\n err=[]\n warn=[]\n source_ok = False\n dest_permission = False\n dest_empty = False\n destination_ok = False\n copy_ok=''\n hash_mismatch=False\n preserve_hash_issue=False\n src_dest_match=False\n copy_needed=True\n cur_dest_hash=''\n\n try:\n source_head = s3_client.head_object(Bucket=source_bucket, Key=source_obj)\n source_ok = True\n return_dic['source_etag']=source_head['ETag'].strip('\"')\n if (exp_hash is not None) and not (exp_hash == return_dic['source_etag']):\n hash_mismatch=True\n warn.append(\"hash mismatch\")\n except Exception as e:\n source_ok=False\n err.append('Error getting source object:'+str(e))\n\n if ((source_bucket==destination_bucket) and (source_obj==destination_obj)):\n destination_ok = False\n cur_dest_hash=return_dic['source_etag']\n err.append(\"destination and source objects are the same! Not moving\")\n else:\n try:\n dest_head=s3_client.head_object(Bucket=destination_bucket, Key=destination_obj)\n dest_empty = False\n destination_ok = True\n dest_permission = True\n cur_dest_hash=dest_head['ETag'].strip('\"')\n if hash_mismatch and (cur_dest_hash == exp_hash):\n preserve_hash_issue = True\n err.append(\"destination object has expected hash, source does not. Not moving\")\n elif trust_hash and (cur_dest_hash==return_dic['source_etag']):\n preserve_hash_issue=True\n err.append(\"destination object has same hash as source. Trusting hash and not moving\")\n else:\n if owrite:\n warn.append(\"destination object already exists, but allowing overwrite\")\n else:\n err.append(\"destination object already exists, not allowing overwrite\")\n\n except Exception as e:\n if (e.response['Error']['Message'] == 'Forbidden') or (e.response['Error']['Code'] == '403'):\n err.append(str(e))\n dest_permission= False\n elif (e.response['Error']['Message'] == 'Not Found') or (e.response['Error']['Code'] == '404'):\n dest_empty = True\n dest_permission = True\n destination_ok = True\n else:\n destination_ok = False\n\n if source_ok and destination_ok and (dest_empty or owrite) and dest_permission and not preserve_hash_issue:\n try:\n s3_client.copy_object(Bucket=destination_bucket, Key=destination_obj, CopySource={'Bucket':source_bucket, 'Key':source_obj})\n destination_head = s3_client.head_object(Bucket=destination_bucket, Key=destination_obj)\n return_dic['destination_etag'] = destination_head['ETag'].strip('\"')\n copy_ok=False\n if (return_dic['destination_etag'] == return_dic['source_etag']):\n copy_ok = True\n else:\n err.append(\"etag different after moving. Retaining original and new\")\n copy_ok = False\n if copy_ok:\n s3_client.delete_object(Bucket=source_bucket, Key=source_obj)\n else:\n #s3_client.delete_object(Bucket=destination_bucket, Key=destination_obj)\n pass\n except Exception as e:\n err.append(str(e))\n\n elif not dest_empty:\n warn.append(\"did not move but destination object currently exists\")\n return_dic['destination_etag'] = cur_dest_hash\n if (len(err)>0):\n errStr=\"||\".join(err)\n return_dic['err']=errStr\n if (len(warn)>0):\n warnStr=\"||\".join(warn)\n return_dic['warn']=warnStr\n return_dic['copy_ok']=str(copy_ok)\n return_dic['datetime']=str(datetime.datetime.now())\n return return_dic\n\nif __name__ == '__main__':\n aws_key_file = \"../secure_files/aws_key.csv\"\n key_data = pd.read_csv(aws_key_file).iloc[0]\n access_key = key_data['Access key ID']\n secret_key = key_data['Secret access key']\n region = 'us-east-1'\n\n s3_client = boto3.client('s3', region_name=region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)\n ret1=move_safe(s3_client, 'gw-new-test','testmv1/recipe1.jpg','gw-new-test','testmv1/recipe2.jpg',False,None, False)\n print(ret1)\n ret2=move_safe(s3_client, 'gw-new-test', 'testmv1/recipe1.jpg', 'gw-new-test','testmv1/recipe2.jpg',True,None, False)\n print(ret2)\n","repo_name":"ImagingDataCommons/aws_etl","sub_path":"reorg/move_safe.py","file_name":"move_safe.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43312532616","text":"'''\nCheck the number of data in velocity and distance files obtained by Stage simulator given a directory and the prefix of the subdirectory containing the files.\n'''\n\nimport sys\nimport numpy as np\nimport glob\nimport linecache\n\nnRobotsStart = 20\nnRobotsEnd = 300\nnRobotsInc = 20\nnumTests = 40\n\nif len(sys.argv) < 3:\n print(\"Usage:\")\n print(\"python3 \" + sys.argv[0] + \" [container directory] [subdirectory prefix]\")\n exit()\n\ncontainer_dir = sys.argv[1]\nsubdir_prefix = sys.argv[2]\n\nfor n in range(nRobotsStart,nRobotsEnd+1,nRobotsInc):\n for l in range(numTests):\n logFile = container_dir+'/'+subdir_prefix+str(n)+'/log_'+str(l)\n regExForGlobPrefix = container_dir+'/'+subdir_prefix+str(n)+'/log_'+str(l)+'_robot*'\n \n # get variables from log\n minDistance = float(linecache.getline(logFile,12))\n maxVelocity = float(linecache.getline(logFile,13))\n meanDistance = float(linecache.getline(logFile,14))\n sqrtvarDistance = float(linecache.getline(logFile,15))\n nDistance = int(linecache.getline(logFile,16))\n meanVelocity = float(linecache.getline(logFile,17))\n sqrtvarVelocity = float(linecache.getline(logFile,18))\n nVelocity = int(linecache.getline(logFile,19))\n \n Compare1 = (minDistance, maxVelocity, meanDistance, sqrtvarDistance, nDistance, meanVelocity, sqrtvarVelocity, nVelocity)\n \n #get velocities from every robot\n regExForGlob = regExForGlobPrefix+'_v'\n A = [float(a) for f in glob.glob(regExForGlob) for a in open(f).read().splitlines()] \n meanVelRobots, stdVelRobots, maxVelRobots, lenVelRobots = np.mean(A), np.sqrt(np.var(A)), max(A), len(A)\n del A\n \n #get distances from every robot\n regExForGlob = regExForGlobPrefix+'_d'\n A = [float(a) for f in glob.glob(regExForGlob) for a in open(f).read().splitlines()] \n meanDistRobots, stdDistRobots, minDistRobots, lenDistRobots = np.mean(A), np.sqrt(np.var(A)), min(A), len(A)\n del A\n \n Compare2 = (minDistRobots, maxVelRobots, meanDistRobots, stdDistRobots, lenDistRobots, meanVelRobots, stdVelRobots, lenVelRobots)\n \n if max([abs(c1 - c2) for c1, c2 in zip(Compare1, Compare2)]) > 1e-05: \n print(logFile)\n print(Compare1)\n print(Compare2)\n","repo_name":"yuri-tavares/swarm-common-target-area-congestion","sub_path":"common/checkVelocityAndDistanceFiles.py","file_name":"checkVelocityAndDistanceFiles.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8093642981","text":"import numpy as np\nimport pytest\nfrom numpy.testing import assert_almost_equal\nimport cupy\n\nfrom dipy.core import geometry\nfrom dipy.align import floating\nfrom dipy.align import imwarp\nfrom dipy.align import vector_fields as vfu\nfrom dipy.align.transforms import regtransforms\nfrom dipy.align.parzenhist import sample_domain_regular\n\n\nfrom cudipy.align import (\n compose_vector_fields,\n gradient,\n invert_vector_field_fixed_point,\n reorient_vector_field,\n sparse_gradient,\n transform_affine,\n warp,\n)\n\n\npytest.importorskip(\"dipy\")\npytest.importorskip(\"nibabel\")\n\n\n@pytest.mark.parametrize(\"shape\", [(32, 48), (96, 64, 32), (64, 64, 64)])\ndef test_warp(shape):\n \"\"\"Tests the cython implementation of the 3d warpings against scipy.\"\"\"\n\n ndim = len(shape)\n radius = shape[0] / 3\n\n if ndim == 3:\n # Create an image of a sphere\n volume = vfu.create_sphere(*shape, radius)\n volume = np.array(volume, dtype=floating)\n\n # Create a displacement field for warping\n d, dinv = vfu.create_harmonic_fields_3d(*shape, 0.2, 8)\n else:\n # Create an image of a circle\n volume = vfu.create_circle(*shape, radius)\n volume = np.array(volume, dtype=floating)\n\n # Create a displacement field for warping\n d, dinv = vfu.create_harmonic_fields_2d(*shape, 0.2, 8)\n d = np.asarray(d).astype(floating)\n\n if ndim == 3:\n # Select an arbitrary rotation axis\n axis = np.array([0.5, 2.0, 1.5])\n # Select an arbitrary translation matrix\n t = 0.1\n trans = np.array(\n [\n [1, 0, 0, -t * shape[0]],\n [0, 1, 0, -t * shape[1]],\n [0, 0, 1, -t * shape[2]],\n [0, 0, 0, 1],\n ]\n )\n trans_inv = np.linalg.inv(trans)\n theta = np.pi / 5\n s = 1.1\n rot = np.zeros(shape=(4, 4))\n rot[:3, :3] = geometry.rodrigues_axis_rotation(axis, theta)\n rot[3, 3] = 1.0\n\n scale = np.array(\n [[1 * s, 0, 0, 0], [0, 1 * s, 0, 0], [0, 0, 1 * s, 0], [0, 0, 0, 1]]\n )\n elif ndim == 2:\n # Select an arbitrary translation matrix\n t = 0.1\n trans = np.array(\n [[1, 0, -t * shape[0]], [0, 1, -t * shape[1]], [0, 0, 1]]\n )\n trans_inv = np.linalg.inv(trans)\n theta = -1 * np.pi / 6.0\n s = 0.42\n ct = np.cos(theta)\n st = np.sin(theta)\n\n rot = np.array([[ct, -st, 0], [st, ct, 0], [0, 0, 1]])\n\n scale = np.array([[1 * s, 0, 0], [0, 1 * s, 0], [0, 0, 1]])\n\n aff = trans_inv.dot(scale.dot(rot.dot(trans)))\n\n # Select arbitrary (but different) grid-to-space transforms\n sampling_grid2world = scale\n field_grid2world = aff\n field_world2grid = np.linalg.inv(field_grid2world)\n image_grid2world = aff.dot(scale)\n image_world2grid = np.linalg.inv(image_grid2world)\n\n A = field_world2grid.dot(sampling_grid2world)\n B = image_world2grid.dot(sampling_grid2world)\n C = image_world2grid\n\n # Reorient the displacement field according to its grid-to-space\n # transform\n dcopy = np.copy(d)\n if ndim == 3:\n vfu.reorient_vector_field_3d(dcopy, field_grid2world)\n expected = vfu.warp_3d(\n volume, dcopy, A, B, C, np.array(shape, dtype=np.int32)\n )\n elif ndim == 2:\n vfu.reorient_vector_field_2d(dcopy, field_grid2world)\n expected = vfu.warp_2d(\n volume, dcopy, A, B, C, np.array(shape, dtype=np.int32)\n )\n\n dcopyg = cupy.asarray(dcopy)\n volumeg = cupy.asarray(volume)\n Ag = cupy.asarray(A)\n Bg = cupy.asarray(B)\n Cg = cupy.asarray(C)\n\n warped = warp(volumeg, dcopyg, Ag, Bg, Cg, order=1, mode=\"grid-constant\")\n\n cupy.testing.assert_array_almost_equal(warped, expected, decimal=4)\n\n\n@pytest.mark.parametrize(\n \"d_shape, codomain_shape, order\",\n [\n [(64, 64), (80, 80), 1],\n [(64, 64), (80, 80), 0],\n [(64, 64, 64), (80, 80, 80), 1],\n [(64, 64, 64), (80, 80, 80), 0],\n ],\n)\ndef test_transform_affine(d_shape, codomain_shape, order):\n\n ndim = len(d_shape)\n theta = -1 * np.pi / 5.0\n s = 0.5\n ct = np.cos(theta)\n st = np.sin(theta)\n\n if ndim == 2:\n # Create an image of a circle\n radius = d_shape[0] // 4\n volume = vfu.create_circle(*codomain_shape, radius)\n volume = np.array(volume, dtype=floating)\n\n # Generate affine transforms\n t = 0.3\n trans = np.array(\n [[1, 0, -t * d_shape[0]], [0, 1, -t * d_shape[1]], [0, 0, 1]]\n )\n trans_inv = np.linalg.inv(trans)\n rot = np.array([[ct, -st, 0], [st, ct, 0], [0, 0, 1]])\n\n scale = np.array([[1 * s, 0, 0], [0, 1 * s, 0], [0, 0, 1]])\n elif ndim == 3:\n # Create an image of a sphere\n radius = d_shape[0] // 4\n volume = vfu.create_sphere(*codomain_shape, radius)\n volume = np.array(volume, dtype=floating)\n\n # Generate affine transforms\n # Select an arbitrary rotation axis\n axis = np.array([0.5, 2.0, 1.5])\n t = 0.3\n trans = np.array(\n [\n [1, 0, 0, -t * d_shape[0]],\n [0, 1, 0, -t * d_shape[1]],\n [0, 0, 1, -t * d_shape[2]],\n [0, 0, 0, 1],\n ]\n )\n trans_inv = np.linalg.inv(trans)\n\n rot = np.zeros(shape=(4, 4))\n rot[:3, :3] = geometry.rodrigues_axis_rotation(axis, theta)\n rot[3, 3] = 1.0\n\n scale = np.array(\n [[1 * s, 0, 0, 0], [0, 1 * s, 0, 0], [0, 0, 1 * s, 0], [0, 0, 0, 1]]\n )\n gt_affine = trans_inv.dot(scale.dot(rot.dot(trans)))\n\n # # Apply the affine transform to the grid coordinates\n # Y = np.apply_along_axis(gt_affine.dot, 0, X)[0:2, ...]\n\n # expected = map_coordinates(volume, Y, order=1)\n if order == 1:\n if ndim == 2:\n dipy_func = vfu.transform_2d_affine\n elif ndim == 3:\n dipy_func = vfu.transform_3d_affine\n elif order == 0:\n if ndim == 2:\n dipy_func = vfu.transform_2d_affine_nn\n elif ndim == 3:\n dipy_func = vfu.transform_3d_affine_nn\n expected = dipy_func(volume, np.array(d_shape, dtype=np.int32), gt_affine)\n\n volumed = cupy.asarray(volume)\n warped = transform_affine(volumed, d_shape, gt_affine, order=order)\n\n cupy.testing.assert_array_almost_equal(warped, expected)\n\n\n@pytest.mark.parametrize(\"shape\", [(10, 10), (10, 10, 10)])\ndef test_compose_vector_fields(shape):\n r\"\"\"\n Creates two random displacement field that exactly map pixels from an input\n image to an output image. The resulting displacements and their\n composition, although operating in physical space, map the points exactly\n (up to numerical precision).\n \"\"\"\n np.random.seed(8315759)\n input_shape = shape\n tgt_sh = shape\n ndim = len(shape)\n if ndim == 3:\n # create a simple affine transformation\n ns = input_shape[0]\n nr = input_shape[1]\n nc = input_shape[2]\n s = 1.5\n t = 2.5\n trans = np.array(\n [\n [1, 0, 0, -t * ns],\n [0, 1, 0, -t * nr],\n [0, 0, 1, -t * nc],\n [0, 0, 0, 1],\n ]\n )\n trans_inv = np.linalg.inv(trans)\n scale = np.array(\n [[1 * s, 0, 0, 0], [0, 1 * s, 0, 0], [0, 0, 1 * s, 0], [0, 0, 0, 1]]\n )\n dipy_func = vfu.compose_vector_fields_3d\n dipy_create_func = vfu.create_random_displacement_3d\n elif ndim == 2:\n # create a simple affine transformation\n nr = input_shape[0]\n nc = input_shape[1]\n s = 1.5\n t = 2.5\n trans = np.array([[1, 0, -t * nr], [0, 1, -t * nc], [0, 0, 1]])\n trans_inv = np.linalg.inv(trans)\n scale = np.array([[1 * s, 0, 0], [0, 1 * s, 0], [0, 0, 1]])\n dipy_func = vfu.compose_vector_fields_2d\n dipy_create_func = vfu.create_random_displacement_2d\n\n gt_affine = trans_inv.dot(scale.dot(trans))\n\n # create two random displacement fields\n input_grid2world = gt_affine\n target_grid2world = gt_affine\n\n disp1, assign1 = dipy_create_func(\n np.array(input_shape, dtype=np.int32),\n input_grid2world,\n np.array(tgt_sh, dtype=np.int32),\n target_grid2world,\n )\n disp1 = np.array(disp1, dtype=floating)\n assign1 = np.array(assign1)\n\n disp2, assign2 = dipy_create_func(\n np.array(input_shape, dtype=np.int32),\n input_grid2world,\n np.array(tgt_sh, dtype=np.int32),\n target_grid2world,\n )\n disp2 = np.array(disp2, dtype=floating)\n assign2 = np.array(assign2)\n\n # create a random image (with decimal digits) to warp\n moving_image = np.empty(tgt_sh, dtype=floating)\n moving_image[...] = np.random.randint(0, 10, np.size(moving_image)).reshape(\n tuple(tgt_sh)\n )\n # set boundary values to zero so we don't test wrong interpolation due to\n # floating point precision\n if ndim == 3:\n moving_image[0, :, :] = 0\n moving_image[-1, :, :] = 0\n moving_image[:, 0, :] = 0\n moving_image[:, -1, :] = 0\n moving_image[:, :, 0] = 0\n moving_image[:, :, -1] = 0\n # evaluate the composed warping using the exact assignments\n # (first 1 then 2)\n\n warp1 = moving_image[\n (assign2[..., 0], assign2[..., 1], assign2[..., 2])\n ]\n expected = warp1[(assign1[..., 0], assign1[..., 1], assign1[..., 2])]\n\n elif ndim == 2:\n moving_image[0, :] = 0\n moving_image[-1, :] = 0\n moving_image[:, 0] = 0\n moving_image[:, -1] = 0\n # evaluate the composed warping using the exact assignments\n # (first 1 then 2)\n\n warp1 = moving_image[(assign2[..., 0], assign2[..., 1])]\n expected = warp1[(assign1[..., 0], assign1[..., 1])]\n\n # compose the displacement fields\n target_world2grid = np.linalg.inv(target_grid2world)\n premult_index = target_world2grid.dot(input_grid2world)\n premult_disp = target_world2grid\n\n disp1d = cupy.asarray(disp1)\n disp2d = cupy.asarray(disp2)\n premult_indexd = cupy.asarray(premult_index)\n premult_dispd = cupy.asarray(premult_disp)\n moving_imaged = cupy.asarray(moving_image)\n\n for time_scaling in [0.25, 1.0, 4.0]:\n composition, stats = dipy_func(\n disp1,\n disp2 / time_scaling,\n premult_index,\n premult_disp,\n time_scaling,\n None,\n )\n compositiond, statsd = compose_vector_fields(\n disp1d,\n disp2d / time_scaling,\n premult_indexd,\n premult_dispd,\n time_scaling,\n None,\n )\n cupy.testing.assert_array_almost_equal(composition, compositiond)\n cupy.testing.assert_array_almost_equal(stats, statsd)\n\n for order in [0, 1]:\n warped = warp(\n moving_imaged,\n compositiond,\n None,\n premult_indexd,\n premult_dispd,\n order=order,\n )\n cupy.testing.assert_array_almost_equal(warped, expected)\n\n # test updating the displacement field instead of creating a new one\n compositiond = disp1d.copy()\n compose_vector_fields(\n compositiond,\n disp2d / time_scaling,\n premult_indexd,\n premult_dispd,\n time_scaling,\n compositiond,\n )\n\n for order in [0, 1]:\n warped = warp(\n moving_imaged,\n compositiond,\n None,\n premult_indexd,\n premult_dispd,\n order=order,\n )\n cupy.testing.assert_array_almost_equal(warped, expected)\n\n # Test non-overlapping case\n if ndim == 3:\n x_0 = np.asarray(range(input_shape[0]))\n x_1 = np.asarray(range(input_shape[1]))\n x_2 = np.asarray(range(input_shape[2]))\n X = np.empty(input_shape + (3,), dtype=np.float64)\n O = np.ones(input_shape)\n X[..., 0] = x_0[:, None, None] * O\n X[..., 1] = x_1[None, :, None] * O\n X[..., 2] = x_2[None, None, :] * O\n sz = input_shape[0] * input_shape[1] * input_shape[2] * 3\n random_labels = np.random.randint(0, 2, sz)\n random_labels = random_labels.reshape(input_shape + (3,))\n elif ndim == 2:\n # Test non-overlapping case\n x_0 = np.asarray(range(input_shape[0]))\n x_1 = np.asarray(range(input_shape[1]))\n X = np.empty(input_shape + (2,), dtype=np.float64)\n O = np.ones(input_shape)\n X[..., 0] = x_0[:, None] * O\n X[..., 1] = x_1[None, :] * O\n random_labels = np.random.randint(\n 0, 2, input_shape[0] * input_shape[1] * 2\n )\n random_labels = random_labels.reshape(input_shape + (2,))\n values = np.array([-1, tgt_sh[0]])\n disp1 = (values[random_labels] - X).astype(floating)\n disp1d = cupy.asarray(disp1)\n disp2d = cupy.asarray(disp2)\n composition, stats = compose_vector_fields(\n disp1d, disp2d, None, None, 1.0, None\n )\n cupy.testing.assert_array_almost_equal(\n composition, cupy.zeros_like(composition)\n )\n\n # test updating the displacement field instead of creating a new one\n compositiond = disp1d.copy()\n compose_vector_fields(compositiond, disp2d, None, None, 1.0, compositiond)\n cupy.testing.assert_array_almost_equal(\n compositiond, cupy.zeros_like(composition)\n )\n\n # TODO: resolve difference with DiPy by raising an error for the commented\n # cases below? Currently second array allows 3x3\n # Test exception is raised when the affine transform matrix is not valid\n # if ndim == 3:\n # valid = cupy.zeros((3, 4), dtype=cupy.float64)\n # invalid = cupy.zeros((3, 3), dtype=cupy.float64)\n # elif ndim == 2:\n # valid = cupy.zeros((2, 3), dtype=cupy.float64)\n # invalid = cupy.zeros((2, 2), dtype=cupy.float64)\n # with pytest.raises(ValueError):\n # compose_vector_fields(disp1d, disp2d, invalid, valid, 1.0, None)\n # with pytest.raises(ValueError):\n # compose_vector_fields(disp1d, disp2d, valid, invalid, 1.0, None)\n\n\n@pytest.mark.parametrize(\"shape\", [(64, 64), (64, 64, 64)])\ndef test_invert_vector_field(shape):\n r\"\"\"\n Inverts a synthetic, analytically invertible, displacement field\n \"\"\"\n ndim = len(shape)\n if ndim == 3:\n ns = shape[0]\n nr = shape[1]\n nc = shape[2]\n\n # Create an arbitrary image-to-space transform\n\n # Select an arbitrary rotation axis\n axis = np.array([2.0, 0.5, 1.0])\n t = 2.5 # translation factor\n\n trans = np.array(\n [\n [1, 0, 0, -t * ns],\n [0, 1, 0, -t * nr],\n [0, 0, 1, -t * nc],\n [0, 0, 0, 1],\n ]\n )\n dipy_create_func = vfu.create_harmonic_fields_3d\n dipy_reorient_func = vfu.reorient_vector_field_3d\n dipy_invert_func = vfu.invert_vector_field_fixed_point_3d\n elif ndim == 2:\n nr = shape[0]\n nc = shape[1]\n # Create an arbitrary image-to-space transform\n t = 2.5 # translation factor\n\n trans = np.array([[1, 0, -t * nr], [0, 1, -t * nc], [0, 0, 1]])\n dipy_create_func = vfu.create_harmonic_fields_2d\n dipy_reorient_func = vfu.reorient_vector_field_2d\n dipy_invert_func = vfu.invert_vector_field_fixed_point_2d\n\n trans_inv = np.linalg.inv(trans)\n\n d, _ = dipy_create_func(*shape, 0.2, 8)\n d = np.asarray(d).astype(floating)\n\n for theta in [-1 * np.pi / 5.0, 0.0, np.pi / 5.0]: # rotation angle\n for s in [0.5, 1.0, 2.0]: # scale\n if ndim == 3:\n rot = np.zeros(shape=(4, 4))\n rot[:3, :3] = geometry.rodrigues_axis_rotation(axis, theta)\n rot[3, 3] = 1.0\n scale = np.array(\n [\n [1 * s, 0, 0, 0],\n [0, 1 * s, 0, 0],\n [0, 0, 1 * s, 0],\n [0, 0, 0, 1],\n ]\n )\n elif ndim == 2:\n ct = np.cos(theta)\n st = np.sin(theta)\n\n rot = np.array([[ct, -st, 0], [st, ct, 0], [0, 0, 1]])\n\n scale = np.array([[1 * s, 0, 0], [0, 1 * s, 0], [0, 0, 1]])\n\n gt_affine = trans_inv.dot(scale.dot(rot.dot(trans)))\n gt_affine_inv = np.linalg.inv(gt_affine)\n dcopy = np.copy(d)\n\n dcopyd = cupy.asarray(dcopy)\n gt_affined = cupy.asarray(gt_affine)\n gt_affine_invd = cupy.asarray(gt_affine_inv)\n\n # make sure the field remains invertible after the re-mapping\n dipy_reorient_func(dcopy, gt_affine)\n\n # TODO: can't do in-place computation unless out= is supplied and\n # dcopy has the dimensions axis first instead of last\n dcopyd = reorient_vector_field(dcopyd, gt_affined)\n cupy.testing.assert_array_almost_equal(dcopyd, dcopy, decimal=4)\n\n # Note: the spacings are used just to check convergence, so they\n # don't need to be very accurate. Here we are passing (0.5 * s) to\n # force the algorithm to make more iterations: in ANTS, there is a\n # hard-coded bound on the maximum residual, that's why we cannot\n # force more iteration by changing the parameters.\n # We will investigate this issue with more detail in the future.\n\n if False:\n from cupyx.time import repeat\n\n perf = repeat(\n invert_vector_field_fixed_point,\n (\n dcopyd,\n gt_affine_invd,\n cupy.asarray([s, s, s]) * 0.5,\n 40,\n 1e-7,\n ),\n n_warmup=20,\n n_repeat=80,\n )\n print(perf)\n perf = repeat(\n dipy_invert_func,\n (\n dcopy,\n gt_affine_inv,\n np.asarray([s, s, s]) * 0.5,\n 40,\n 1e-7,\n ),\n n_warmup=0,\n n_repeat=8,\n )\n print(perf)\n # if False:\n # from pyvolplot import volshow\n # from matplotlib import pyplot as plt\n # inv_approx, q, norms, tmp1, tmp2, epsilon, maxlen = vfu.invert_vector_field_fixed_point_3d_debug(\n # dcopy, gt_affine_inv, np.array([s, s, s]) * 0.5, max_iter=1, tol=1e-7\n # )\n # inv_approxd, qd, normsd, tmp1d, tmp2d, epsilond, maxlend = invert_vector_field_fixed_point(\n # dcopyd, gt_affine_invd, cupy.asarray([s, s, s]) * 0.5, max_iter=1, tol=1e-7\n # )\n inv_approxd = invert_vector_field_fixed_point(\n dcopyd, gt_affine_invd, cupy.asarray([s, s, s]) * 0.5, 40, 1e-7\n )\n\n if False:\n inv_approx = dipy_invert_func(\n dcopy, gt_affine_inv, np.array([s, s, s]) * 0.5, 40, 1e-7\n )\n cupy.testing.assert_allclose(\n inv_approx, inv_approxd, rtol=1e-2, atol=1e-2\n )\n\n # TODO: use GPU-based imwarp here once implemented\n mapping = imwarp.DiffeomorphicMap(ndim, shape, gt_affine)\n mapping.forward = dcopy\n mapping.backward = inv_approxd.get()\n residual, stats = mapping.compute_inversion_error()\n assert_almost_equal(stats[1], 0, decimal=3)\n assert_almost_equal(stats[2], 0, decimal=3)\n\n # # Test exception is raised when the affine transform matrix is not valid\n # invalid = cupy.zeros((3, 3), dtype=np.float64)\n # spacing = cupy.asarray([1.0, 1.0, 1.0])\n # with pytest.raises(ValueError):\n # invert_vector_field_fixed_point(dcopyd, invalid, spacing, 40, 1e-7, None)\n\n\ndef test_gradient_2d():\n np.random.seed(3921116)\n sh = (25, 32)\n # Create grid coordinates\n x_0 = np.arange(sh[0])\n x_1 = np.arange(sh[1])\n X = np.empty(sh + (3,), dtype=np.float64)\n O = np.ones(sh)\n X[..., 0] = x_0[:, None] * O\n X[..., 1] = x_1[None, :] * O\n X[..., 2] = 1\n\n transform = regtransforms[(\"RIGID\", 2)]\n theta = np.array([0.1, 5.0, 2.5])\n T = transform.param_to_matrix(theta)\n TX = X.dot(T.T)\n # Eval an arbitrary (known) function at TX\n # f(x, y) = ax^2 + bxy + cy^{2}\n # df/dx = 2ax + by\n # df/dy = 2cy + bx\n a = 2e-3\n b = 5e-3\n c = 7e-3\n img = (\n a * TX[..., 0] ** 2 + b * TX[..., 0] * TX[..., 1] + c * TX[..., 1] ** 2\n )\n img = img.astype(floating)\n # img is an image sampled at X with grid-to-space transform T\n\n # Test sparse gradient: choose some sample points (in space)\n sample = sample_domain_regular(20, np.array(sh, dtype=np.int32), T)\n sample = np.array(sample)\n # Compute the analytical gradient at all points\n expected = np.empty((sample.shape[0], 2), dtype=floating)\n expected[..., 0] = 2 * a * sample[:, 0] + b * sample[:, 1]\n expected[..., 1] = 2 * c * sample[:, 1] + b * sample[:, 0]\n # Get the numerical gradient with the implementation under test\n sp_to_grid = np.linalg.inv(T)\n img_spacing = np.ones(2)\n\n img_d = cupy.asarray(img)\n img_spacing_d = cupy.asarray(img_spacing)\n sp_to_grid_d = cupy.asarray(sp_to_grid)\n sample_d = cupy.asarray(sample)\n\n actual, inside = vfu.sparse_gradient(img, sp_to_grid, img_spacing, sample)\n actual_gpu, inside_gpu = sparse_gradient(\n img_d, sp_to_grid_d, img_spacing_d, sample_d.T\n )\n atol = rtol = 1e-5\n cupy.testing.assert_allclose(\n actual * inside[..., np.newaxis],\n actual_gpu * inside_gpu[..., np.newaxis],\n atol=atol,\n rtol=rtol,\n )\n cupy.testing.assert_array_equal(inside, inside_gpu)\n\n # TODO: verify exceptions\n # # Verify exception is raised when passing invalid affine or spacings\n # invalid_affine = np.eye(2)\n # invalid_spacings = np.ones(1)\n # assert_raises(ValueError, vfu.sparse_gradient, img, invalid_affine,\n # img_spacing, sample)\n # assert_raises(ValueError, vfu.sparse_gradient, img, sp_to_grid,\n # invalid_spacings, sample)\n\n # Test dense gradient\n # Compute the analytical gradient at all points\n expected = np.empty(sh + (2,), dtype=floating)\n expected[..., 0] = 2 * a * TX[..., 0] + b * TX[..., 1]\n expected[..., 1] = 2 * c * TX[..., 1] + b * TX[..., 0]\n # Get the numerical gradient with the implementation under test\n sp_to_grid = np.linalg.inv(T)\n img_spacing = np.ones(2)\n\n actual, inside = vfu.gradient(img, sp_to_grid, img_spacing, sh, T)\n sp_to_grid_d = cupy.asarray(sp_to_grid)\n img_spacing_d = cupy.asarray(img_spacing)\n T_d = cupy.asarray(T)\n actual_gpu, inside_gpu = gradient(\n img_d, sp_to_grid_d, img_spacing_d, sh, T_d\n )\n\n atol = rtol = 1e-5\n cupy.testing.assert_allclose(\n actual * inside[..., np.newaxis],\n actual_gpu * inside_gpu[..., np.newaxis],\n atol=atol,\n rtol=rtol,\n )\n cupy.testing.assert_array_equal(inside, inside_gpu)\n\n # In the dense case, we are evaluating at the exact points (sample points\n # are not slightly moved like in the sparse case) so we have more precision\n\n # TODO: verify exceptions\n # # Verify exception is raised when passing invalid affine or spacings\n # assert_raises(ValueError, vfu.gradient, img, invalid_affine, img_spacing,\n # sh, T)\n # assert_raises(ValueError, vfu.gradient, img, sp_to_grid, img_spacing,\n # sh, invalid_affine)\n # assert_raises(ValueError, vfu.gradient, img, sp_to_grid, invalid_spacings,\n # sh, T)\n\n\ndef test_gradient_3d():\n np.random.seed(3921116)\n shape = (25, 32, 15)\n # Create grid coordinates\n x_0 = np.asarray(range(shape[0]))\n x_1 = np.asarray(range(shape[1]))\n x_2 = np.asarray(range(shape[2]))\n X = np.zeros(shape + (4,), dtype=np.float64)\n O = np.ones(shape)\n X[..., 0] = x_0[:, None, None] * O\n X[..., 1] = x_1[None, :, None] * O\n X[..., 2] = x_2[None, None, :] * O\n X[..., 3] = 1\n\n transform = regtransforms[(\"RIGID\", 3)]\n theta = np.array([0.1, 0.05, 0.12, -12.0, -15.5, -7.2])\n T = transform.param_to_matrix(theta)\n\n TX = X.dot(T.T)\n # Eval an arbitrary (known) function at TX\n # f(x, y, z) = ax^2 + by^2 + cz^2 + dxy + exz + fyz\n # df/dx = 2ax + dy + ez\n # df/dy = 2by + dx + fz\n # df/dz = 2cz + ex + fy\n a, b, c = 2e-3, 3e-3, 1e-3\n d, e, f = 1e-3, 2e-3, 3e-3\n img = (\n a * TX[..., 0] ** 2\n + b * TX[..., 1] ** 2\n + c * TX[..., 2] ** 2\n + d * TX[..., 0] * TX[..., 1]\n + e * TX[..., 0] * TX[..., 2]\n + f * TX[..., 1] * TX[..., 2]\n )\n\n img = img.astype(floating)\n # Test sparse gradient: choose some sample points (in space)\n sample = sample_domain_regular(100, np.array(shape, dtype=np.int32), T)\n sample = np.array(sample)\n # Compute the analytical gradient at all points\n expected = np.empty((sample.shape[0], 3), dtype=floating)\n expected[..., 0] = (\n 2 * a * sample[:, 0] + d * sample[:, 1] + e * sample[:, 2]\n )\n expected[..., 1] = (\n 2 * b * sample[:, 1] + d * sample[:, 0] + f * sample[:, 2]\n )\n expected[..., 2] = (\n 2 * c * sample[:, 2] + e * sample[:, 0] + f * sample[:, 1]\n )\n # Get the numerical gradient with the implementation under test\n sp_to_grid = np.linalg.inv(T)\n img_spacing = np.ones(3)\n actual, inside = vfu.sparse_gradient(img, sp_to_grid, img_spacing, sample)\n\n img_d = cupy.asarray(img)\n img_spacing_d = cupy.asarray(img_spacing)\n sp_to_grid_d = cupy.asarray(sp_to_grid)\n sample_d = cupy.asarray(sample)\n actual_gpu, inside_gpu = sparse_gradient(\n img_d, sp_to_grid_d, img_spacing_d, sample_d.T\n )\n atol = rtol = 1e-5\n cupy.testing.assert_allclose(\n actual * inside[..., np.newaxis],\n actual_gpu * inside_gpu[..., np.newaxis],\n atol=atol,\n rtol=rtol,\n )\n cupy.testing.assert_array_equal(inside, inside_gpu)\n\n # TODO: test invalid inputs\n # # Verify exception is raised when passing invalid affine or spacings\n # invalid_affine = np.eye(3)\n # invalid_spacings = np.ones(2)\n # assert_raises(ValueError, vfu.sparse_gradient, img, invalid_affine,\n # img_spacing, sample)\n # assert_raises(ValueError, vfu.sparse_gradient, img, sp_to_grid,\n # invalid_spacings, sample)\n\n # Test dense gradient\n # Compute the analytical gradient at all points\n expected = np.empty(shape + (3,), dtype=floating)\n expected[..., 0] = 2 * a * TX[..., 0] + d * TX[..., 1] + e * TX[..., 2]\n expected[..., 1] = 2 * b * TX[..., 1] + d * TX[..., 0] + f * TX[..., 2]\n expected[..., 2] = 2 * c * TX[..., 2] + e * TX[..., 0] + f * TX[..., 1]\n # Get the numerical gradient with the implementation under test\n sp_to_grid = np.linalg.inv(T)\n img_spacing = np.ones(3)\n actual, inside = vfu.gradient(img, sp_to_grid, img_spacing, shape, T)\n\n sp_to_grid_d = cupy.asarray(sp_to_grid)\n img_spacing_d = cupy.asarray(img_spacing)\n T_d = cupy.asarray(T)\n actual_gpu, inside_gpu = gradient(\n img_d, sp_to_grid_d, img_spacing_d, shape, T_d\n )\n\n atol = rtol = 1e-5\n cupy.testing.assert_allclose(\n actual * inside[..., np.newaxis],\n actual_gpu * inside_gpu[..., np.newaxis],\n atol=atol,\n rtol=rtol,\n )\n cupy.testing.assert_array_equal(inside, inside_gpu)\n\n # TODO: test invalid inputs\n # # In the dense case, we are evaluating at the exact points (sample points\n # # are not slightly moved like in the sparse case) so we have more precision\n # assert_equal(diff.max() < 1e-5, True)\n # # Verify exception is raised when passing invalid affine or spacings\n # assert_raises(ValueError, vfu.gradient, img, invalid_affine, img_spacing,\n # shape, T)\n # assert_raises(ValueError, vfu.gradient, img, sp_to_grid, img_spacing,\n # shape, invalid_affine)\n # assert_raises(ValueError, vfu.gradient, img, sp_to_grid, invalid_spacings,\n # shape, T)\n","repo_name":"dipy/cudipy","sub_path":"cudipy/align/tests/test_vector_fields.py","file_name":"test_vector_fields.py","file_ext":"py","file_size_in_byte":28621,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"71442923282","text":"from entities.user import User\nimport re\n\nclass UserRepository:\n def __init__(self):\n self._users = []\n\n def find_all(self):\n return self._users\n\n def find_by_username(self, username):\n users = self.find_all()\n\n users_with_username = filter(\n lambda user: user.username == username,\n users\n )\n\n users_with_username_list = list(users_with_username)\n\n return users_with_username_list[0] if len(users_with_username_list) > 0 else None\n\n def create(self, user):\n \n if len(user.username) < 3:\n raise Exception(\n f\"User with username {user.username} is too short!\"\n )\n\n if len(user.password) < 3:\n raise Exception(\n f\"Password should more than 3 values\"\n )\n \n if (re.match(\"^[a-z]+$\", f\"{user.username}\")) == False:\n raise Exception(\n f\"Username should be only letters! \"\n )\n\n\n users = self.find_all()\n\n existing_user = self.find_by_username(user.username)\n\n if existing_user:\n raise Exception(\n f\"User with username {user.username} already exists\"\n )\n\n\n users.append(user)\n\n self._users = users\n\n return user\n\n def delete(self, user_id):\n users = self.find_all()\n\n users_without_id = filter(lambda user: user.id != user_id, users)\n\n self._users = list(users_without_id)\n\n def delete_all(self):\n self._users = []\n","repo_name":"lifeofborna/ohtu-s22-palautukset","sub_path":"viikko3/login-robot/src/repositories/user_repository.py","file_name":"user_repository.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37546643969","text":"\"\"\"Chapter 4: Trees and Graphs. Question 4.2\"\"\"\n\n# Given a list of unique (sorted ascending) elements,\n# create a binary search tree.\n\nimport math\nimport json\n\n\ndata = [1, 2, 4, 5, 6, 7, 8, 10, 13, 15, 17, 20]\n\n\ndef safe_print(maybe_node):\n \"\"\"Safe print a maybe node.\"\"\"\n if maybe_node is None:\n return {}\n else:\n return maybe_node.as_dict()\n\n\nclass BTreeNode:\n \"\"\"Node in a binary search tree.\"\"\"\n\n def __init__(self, value):\n \"\"\"Initialization.\"\"\"\n self.value = value\n self.left = None\n self.right = None\n\n def as_dict(self):\n \"\"\"Return as dict.\"\"\"\n if self.value is None:\n return {}\n else:\n return {\n \"value\": self.value,\n \"left\": safe_print(self.left),\n \"right\": safe_print(self.right),\n }\n\n def __repr__(self):\n \"\"\"How to print yourself.\"\"\"\n return json.dumps(self.as_dict(), indent=2)\n\n\ndef get_node_val(list_of_elems):\n \"\"\"Find the middle or just right of middle node.\"\"\"\n length = len(list_of_elems)\n\n # If there is only one element\n if len(list_of_elems) == 1:\n return (list_of_elems[0], 0)\n\n # If there are only two elements\n # The leftmost element should be the child\n if len(list_of_elems) == 2:\n return (list_of_elems[1], 1)\n\n index = length // 2\n\n return (list_of_elems[index], index)\n\n\ndef make_tree(elems):\n \"\"\"Make a binary search tree.\"\"\"\n # Handle an empty list\n if not elems:\n return None\n\n # Get the median or right of median value\n (value, index) = get_node_val(elems)\n # Create a node from that value\n node = BTreeNode(value)\n # Now delete the value from the list\n del elems[index]\n\n node.left = make_tree([e for e in elems if e < value])\n node.right = make_tree([e for e in elems if e > value])\n\n return node\n\n\ndata_1 = [1]\ndata_2 = [1, 2]\ndata_3 = [1, 2, 3]\ndata_5 = list(range(1, 6))\ndata_7 = list(range(1, 8))\ndata_9 = list(range(1, 10))\n\n# print(make_tree(data_1))\n# print(make_tree(data_2))\n# print(make_tree(data_3))\n# print(make_tree(data_5))\n# print(make_tree(data_7))\n# print(make_tree(data_9))\nprint(make_tree(data))\n","repo_name":"NewMountain/algo_practice","sub_path":"chapter_4/4_2.py","file_name":"4_2.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28928621030","text":"import numpy as np\nimport torch\n\n\nclass OrnsteinUhlenbeckActionNoise:\n \"\"\"\n Ornstein–Uhlenbeck噪声\n 参考:https://zhuanlan.zhihu.com/p/96720878\n 相比于独立噪声(高斯噪音),OU噪声适合于惯性系统,尤其是时间离散化粒度较小的情况,\n 在时间离散化粒度不小的情况下(如0.1s),独立噪音也有不错的效果\n mu, theta, sigma是OU噪声的参数,均为正值\n mu为均值,要在其上施加噪声,在类中传入的mu需要时一个“与动作相关的向量”,如np.zeros(action_dim)\n sigma是维纳过程的参数,决定了噪声放大的倍数\n theta值越大,向均值靠近的速度越快,由噪声回归均值的时间更短\n \"\"\"\n\n def __init__(self, mu, theta=0.15, max_sigma=0.3, min_sigma=0.1, dt=1e-2, x0=None, decay_period=100000):\n self.x_prev = None # 没有施加噪声的原值(不是action值,原action加上这个值才是OU处理过的action)\n self.mu = mu # OU噪声的参数\n self.theta = theta # OU噪声的参数\n self.sigma = max_sigma # OU噪声的参数\n self.dt = dt\n self.x0 = x0\n self.reset()\n\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.decay_period = decay_period\n\n # 直接调用得到噪音,该噪音要加在action上,加完后在使用np.clip()对action进行裁剪,限制其范围\n def __call__(self, t=0):\n self.sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period) # sigma会逐渐衰减\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(\n self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n # def sample(self):\n # dx = self.theta * (self.mu - self.X)\n # dx = dx + self.sigma * np.random.randn(self.n_actions)\n # self.X = self.X + dx\n # return self.X\n #\n # def get_action(self, action, t=0):\n # ou_x = self.sample() # 经过噪声处理的值\n # self.sigma = self.max_sigma -\n # (self.max_sigma - self.min_sigma) * min(1.0, t / self.decay_period) # sigma会逐渐衰减\n # return np.clip(action + ou_x, self.low, self.high) # 动作加上噪声后进行剪切\n\n\nif __name__ == '__main__':\n action = np.array([.25, .1, .7])\n n_action = action.shape[0]\n ou_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(n_action))\n print(ou_noise())\n ou_noise.reset()\n for i in range(100):\n print(ou_noise(i))\n # action = action + ou_noise(t=100) # 动作加噪音\n # print(action)\n # action = np.clip(action, -.5, .5) # 裁剪\n # print(action[0], action[1], action[2])\n","repo_name":"WZTMT/single_uav_ddpg","sub_path":"model/ou_noise.py","file_name":"ou_noise.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"42737865151","text":"import numpy as np\r\n\r\n\r\n# --- Hyperparameters\r\nALPHA_BACKTRACKING = 0.25\r\nBETA_BACKTRACKING = 0.5\r\nSTABILITY_EPS = 1e-6\r\n\r\n# GD\r\nCONDITION_NUM_THRESHOLD = 2e9\r\nMAX_STEP_GD = 1000\r\nEPSILON_GD_STOP = 7 * 1e-2\r\n\r\n# Newton\r\nEPSILON_QUASY_NEWTON = 0.000001\r\nMAX_STEP_NEWTON = 10\r\nEPSILON_NEWTON_STOP = 1e-4\r\n\r\n\r\n# --- Helper functions\r\ndef euclidean_proj_simplex(x):\r\n \"\"\"\r\n Projection to the unit simplex.\r\n :param x: The vector to project.\r\n :return: The projection of x onto the unit simplex.\r\n \"\"\"\r\n # check if we are already on the simplex\r\n if x.sum() == 1 and np.alltrue(x >= 0):\r\n # best projection: itself!\r\n return x\r\n # get the array of cumulative sums of a sorted (decreasing) copy of v\r\n u = np.sort(x)[::-1]\r\n cssu = np.cumsum(u)\r\n # get the number of > 0 components of the optimal solution\r\n K = np.nonzero(u * np.arange(1, x.shape[0] + 1) > (cssu - 1))[0][-1]\r\n # compute the Lagrange multiplier associated to the simplex constraint\r\n theta = (cssu[K] - 1) / (K + 1)\r\n # compute the projection by thresholding v using theta\r\n w = np.clip(a=(x - theta), a_min=0, a_max=None)\r\n return w\r\n\r\n\r\ndef relaxation_solver(hessian, grad_const):\r\n \"\"\"\r\n This function provides the closed form solution for the problem\r\n with the sum 1 constraint only.\r\n :param hessian: The hessian of the least square problem - H.T @ H\r\n :param grad_const: The constant of the gradient of the least square problem - H.T @ y\r\n :return: weights vector as solution\r\n \"\"\"\r\n c_vec = np.ones(hessian.shape[0])\r\n upper_block = np.column_stack((hessian, -c_vec))\r\n lower_block = np.hstack((c_vec.T, 0))\r\n kkt = np.vstack((upper_block, lower_block))\r\n kkt_y = np.hstack((grad_const, 1))\r\n\r\n try:\r\n beta = np.linalg.solve(kkt, kkt_y)\r\n except np.linalg.LinAlgError: # handling singular kkt matrix\r\n eps_mat = np.eye(kkt.shape[0]) * STABILITY_EPS\r\n kkt = kkt + eps_mat\r\n beta = np.linalg.solve(kkt, kkt_y)\r\n\r\n return beta[:-1]\r\n\r\n\r\ndef backtracking_line_search_for_GD(f, x_i, grad, alpha=ALPHA_BACKTRACKING, beta=BETA_BACKTRACKING):\r\n \"\"\"\r\n Back-tracking method for gradient descent algorithm\r\n :param f: the objective function\r\n :param x_i: the current x\r\n :param grad: function mapping points to gradients\r\n :param alpha: a hyper-parameter\r\n :param beta: a hyper-parameter\r\n :return: the optimal step size\r\n \"\"\"\r\n t = 1\r\n norm_grad_f = grad.dot(grad)\r\n while f(x_i - t * grad) > f(x_i) - alpha * t * norm_grad_f:\r\n t *= beta\r\n return t\r\n\r\n\r\ndef backtracking_line_search_for_Newton(f, x_i, lam_sqr, hessian_inv_dot_grad, alpha=ALPHA_BACKTRACKING,\r\n beta=BETA_BACKTRACKING):\r\n \"\"\"\r\n Back-tracking method for gradient descent algorithm\r\n :param f: the objective function\r\n :param x_i: the current x\r\n :param lam_sqr: hessian squared * grad\r\n :param hessian_inv_dot_grad:\r\n :param alpha: a hyper-parameter\r\n :param beta: a hyper-parameter\r\n :return: the optimal step size\r\n \"\"\"\r\n t = 1\r\n while f(x_i - t * hessian_inv_dot_grad) > f(x_i) - alpha * t * lam_sqr:\r\n t *= beta\r\n return t\r\n\r\n\r\ndef original_objective(H, y):\r\n \"\"\"Least squares objective.\"\"\"\r\n return lambda x: np.linalg.norm(H.dot(x) - y)\r\n\r\n\r\ndef least_squares(H, y):\r\n \"\"\"Least squares objective.\"\"\"\r\n return lambda x: 0.5 * np.linalg.norm(H.dot(x) - y) ** 2\r\n\r\n\r\ndef least_squares_gradient(hessian, grad_const):\r\n \"\"\"Gradient of least squares objective at x.\"\"\"\r\n return lambda x: hessian.dot(x) - grad_const\r\n\r\n\r\ndef least_squares_hessian(H):\r\n \"\"\"Hessian of least squares objective.\"\"\"\r\n return H.T.dot(H)\r\n\r\n\r\ndef is_valid(x):\r\n \"\"\" For sanity check \"\"\"\r\n return np.isclose(np.sum(x), 1.) and np.alltrue(x >= 0)\r\n\r\n\r\n# --- Solver\r\ndef gradient_descent(init: np.ndarray, H: np.ndarray, y: np.ndarray, hessian: np.ndarray, grad_const: np.ndarray,\r\n steps: int = MAX_STEP_GD, epsilon: int = EPSILON_GD_STOP):\r\n \"\"\"\r\n :param init: initial weights for newton process\r\n :param H: the design matrix of the problem\r\n :param: y: the response vector of the problem\r\n :param hessian: The hessian of the least square problem - H.T @ H\r\n :param grad_const: The constant of the gradient of the least square problem - H.T @ y\r\n :param steps: the maximum steps for the algorithm\r\n :return: the best solution done by the algorithm\r\n \"\"\"\r\n f = least_squares(H, y)\r\n original_f = original_objective(H, y)\r\n grad = least_squares_gradient(hessian, grad_const)\r\n\r\n prev_f_x = 0\r\n x_i = init\r\n for i in range(steps):\r\n f_x_i = original_f(x_i)\r\n if abs(prev_f_x - f_x_i) < epsilon:\r\n break\r\n grad_x_i = grad(x_i)\r\n t = backtracking_line_search_for_GD(f, x_i, grad_x_i)\r\n x_i = euclidean_proj_simplex(x_i - t * grad_x_i)\r\n prev_f_x = f_x_i\r\n return x_i\r\n\r\n\r\ndef newton_projected(init: np.ndarray, H: np.ndarray, y: np.ndarray, hessian: np.ndarray, grad_const: np.ndarray,\r\n steps: int = MAX_STEP_NEWTON,\r\n epsilon: int = EPSILON_NEWTON_STOP):\r\n \"\"\"\r\n Newton method solver\r\n :param init: initial weights for newton process\r\n :param H: the design matrix of the problem\r\n :param: y: the response vector of the problem\r\n :param hessian: The hessian of the least square problem - H.T @ H\r\n :param grad_const: The constant of the gradient of the least square problem - H.T @ y\r\n :return: the best solution done by the algorithm\r\n \"\"\"\r\n f = least_squares(H, y)\r\n original_f = original_objective(H, y)\r\n grad = least_squares_gradient(hessian, grad_const)\r\n\r\n hessian_sqrt = np.linalg.cholesky(hessian + EPSILON_QUASY_NEWTON * np.eye(hessian.shape[0]))\r\n prev_f_x = 0\r\n x_i = init\r\n xs = [init]\r\n for i in range(steps):\r\n f_x_i = original_f(x_i)\r\n if abs(prev_f_x - f_x_i) < epsilon:\r\n break\r\n lam = np.linalg.solve(hessian_sqrt, grad(x_i))\r\n hessian_inv_dot_grad = np.linalg.solve(hessian_sqrt.T, lam)\r\n t = backtracking_line_search_for_Newton(f, xs[-1], lam.dot(lam), hessian_inv_dot_grad)\r\n x_i = euclidean_proj_simplex(xs[-1] - t * hessian_inv_dot_grad)\r\n xs.append(x_i)\r\n prev_f_x = f_x_i\r\n return min(xs, key=f)\r\n\r\n\r\ndef solve(H: np.ndarray, y: np.ndarray) -> np.ndarray:\r\n # preprocessing: calculate expensive matrix multiplications\r\n hessian = H.T.dot(H)\r\n grad_const = H.T.dot(y)\r\n\r\n # Step 1: try the closed solution\r\n x_0 = relaxation_solver(hessian, grad_const)\r\n\r\n # Checks if closed solution hold positive constraint\r\n if np.alltrue(x_0 >= 0):\r\n return x_0\r\n\r\n # Step 2: Gradient Descent Continue\r\n condition_number = 0\r\n if H.shape[0] <= 2000: # For larger matrices the computation becomes very expensive\r\n eig = np.linalg.eigh(hessian)[0]\r\n condition_number = eig[-1] / eig[0] if not np.isclose(eig[0], 0) else np.inf\r\n\r\n # if the first condition holds then the condition number is infinite\r\n if H.shape[0] > H.shape[1] and condition_number <= CONDITION_NUM_THRESHOLD:\r\n x_0 = gradient_descent(x_0, H, y, hessian, grad_const)\r\n\r\n # Step 3: Newton method\r\n x_0 = newton_projected(euclidean_proj_simplex(x_0), H, y, hessian, grad_const)\r\n return x_0","repo_name":"nettashafir/constrainted-least-squares-solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"73448856720","text":"import os\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression as lr\nfrom scipy.spatial.distance import cosine\nimport json\n\nflags = tf.flags\n\nflags.DEFINE_string ('data_dir', 'data/demo', 'data directory, to compute vocab')\nflags.DEFINE_string ('output_dir', 'output', 'output directory, to store summaries')\nflags.DEFINE_string ('nn_score_path', 'cv/score', 'a json file storing sentence scores computed with neural model')\nflags.DEFINE_boolean('symbolic', True, 'use symbolic features, e.g., sentence position, length')\nflags.DEFINE_boolean('distributional', True, 'use distributional features, e.g., sentence position, length')\nflags.DEFINE_string ('embedding_path', 'data', 'emebdding path, which must be specified if distributional=True')\nflags.DEFINE_integer('embedding_dim', 50, 'emebdding size')\n\nFLAGS = flags.FLAGS\n\ndef load_wordvec(embedding_path):\n '''load word vectors'''\n\n print ('loading word vectors')\n word_vec = {}\n with open(embedding_path, \"r\") as f:\n for line in f:\n line = line.rstrip().split(' ')\n word_vec[line[0]] = np.asarray([float(x) for x in line[1:]])\n print ('loading completed')\n\n return word_vec\n\n\ndef load_nn_score(nn_score_path):\n '''load the output scores predicted by an NN model\n this is a json file, which maps file name to a list of sentence scores'''\n scores = {}\n with open(nn_score_dir, 'r') as f:\n for line in f:\n line = json.loads(line)\n for key, val in line.iteritems():\n scores[key] = val\n\n return scores\n\n\ndef normalize(lx):\n '''normalize feature vectors in a small subset'''\n nsamples, nfeatures = len(lx), len(lx[0])\n for i in range(nfeatures):\n column = []\n for j in range(nsamples):\n column.append(lx[j][i])\n total = sum(column)\n for j in range(nsamples):\n if total!=0: lx[j][i] = lx[j][i] / total\n return lx\n\n\nclass Sybolic_Extractor(object):\n '''extract symbolic features: sentence length, position, entity counts\n We normalize all features.'''\n\n def __init__(self, etype='symbolic'):\n self.etype = etype\n \n @staticmethod \n def length(sen):\n return len(sen)\n\n @staticmethod \n def ent_count(sen):\n return sen.count('entity') \n\n def extract_feature(self, sen_list):\n features = []\n for sid, sen in enumerate(sen_list):\n sen_feature = [sid, self.length(sen), self.ent_count(sen)]\n features.append(sen_feature) \n\n return features\n\n\nclass Distributional_Extractor(object):\n '''extract distributional features: \n sentence similary with respect to document\n sentence similary with respect to other sentences\n We normalize all features.'''\n\n def __init__(self, etype='distributional'):\n self.etype = etype\n\n @staticmethod \n def compute_sen_vec(sen, word_vec):\n sen_vec = np.zeros(FLAGS.embedding_dim)\n count = 0\n for word in sen.split(' '):\n if word_vec.has_key(word):\n sen_vec += word_vec[word]\n count += 1\n if count > 0:\n sen_vec = sen_vec / count\n \n return sen_vec\n\n @staticmethod \n def reduncy(sen_vec, doc_vec):\n return 1 - cosine(sen_vec, (doc_vec - sen_vec))\n\n @staticmethod \n def relavence(sen_vec, doc_vec): \n return 1 - cosine(sen_vec, doc_vec)\n\n def extract_feature(self, sen_list, word_vec):\n features = []\n sen_vec_list = []\n for sen in sen_list:\n sen_vec_list.append(self.compute_sen_vec(sen, word_vec))\n\n doc_vec = sum(sen_vec_list) \n\n for sen_vec in sen_vec_list:\n sen_feature = [self.reduncy(sen_vec, doc_vec), self.relavence(sen_vec, doc_vec)]\n features.append(sen_feature)\n\n return features\n\n\ndef train_and_test():\n '''train and test a logistic regression classifier, which uses other features'''\n\n sExtractor = Sybolic_Extractor()\n dExtractor = Distributional_Extractor()\n\n word_vec = load_wordvec(FLAGS.embedding_path)\n\n nn_scores = load_nn_score(FLAGS.nn_score_path)\n\n train_x, train_y = [], []\n\n train_dir = os.path.join(FLAGS.data_dir, 'train')\n train_files = os.listdir(train_dir)\n\n for input_file in train_files:\n input_dir = os.path.join(train_dir, input_file)\n fp = open(input_dir, 'r')\n lines = fp.read().split('\\n\\n')\n sentences = lines[1].split('\\n')\n sens = [sen.split('\\t\\t\\t')[0] for sen in sentences]\n y = [int(sen.split('\\t\\t\\t')[1]) for sen in sentences] \n\n x_n = nn_scores[input_file]\n x_s = sExtractor.extract_feature(sens)\n x_d = dExtractor.extract_feature(sens, word_vec)\n x = [[f1] + f2 + f3 for f1, f2, f3 in zip(x_n, x_s, x_d)] \n x = normalize(x)\n\n train_x.extend(x)\n train_y.extend(y)\n\n fp.close()\n\n train_x = np.asarray(train_x)\n train_y = np.asarray(train_y)\n\n my_lr = lr()\n my_lr.fit(train_x, train_y)\n\n print ('testing...')\n\n test_dir = os.path.join(FLAGS.data_dir, 'test')\n test_files = os.listdir(test_dir)\n\n for input_file in test_files:\n input_dir = os.path.join(test_dir, input_file)\n fp = open(input_dir, 'r')\n lines = fp.read().split('\\n\\n')\n sentences = lines[1].split('\\n')\n sens = [sen.split('\\t\\t\\t')[0] for sen in sentences]\n\n x_n = nn_scores[input_file]\n x_s = sExtractor.extract_feature(sens)\n x_d = dExtractor.extract_feature(sens, word_vec)\n test_x = [[f1] + f2 + f3 for f1, f2, f3 in zip(x_n, x_s, x_d)] \n test_x = normalize(test_x)\n\n fp.close()\n\n score = my_lr.predict_proba(np.asarray(test_x))\n # we need score for the postive classes\n sen_score = {}\n for sid, sentence in enumerate(sens):\n sen_score[sentence] = score[sid][1] + 0.5 * score[sid][2]\n\n sorted_sen = sorted(sen_score.items(), key=lambda d: d[1], reverse=True) \n selected = [s[0] for s in sorted_sen[:3]]\n\n # store selected sentences to output file, following the original order\n file_name = '.'.join(input_file.split('.')[:-1]) + '.output'\n\n output_fp = open(os.path.join(FLAGS.output_dir, file_name), 'w')\n for sen in sens:\n if sen in selected:\n output_fp.write(sen + '\\n')\n output_fp.close()\n\n\nif __name__ == \"__main__\":\n train_and_test()\n\n","repo_name":"kata-ai/indosum","sub_path":"neuralsum/ranking/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"3"} +{"seq_id":"75322028562","text":"import secrets, os\nfrom app_store import admin, login_manager, db, bcrypt\nfrom app_store.models import User, Item, Order\nfrom flask import Blueprint, render_template, redirect, url_for, flash\nfrom app_store.admin.forms import AdminRegisterForm, ItemForm\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom app_store.admin.utils import save_picture1, save_picture2, save_picture3\n\nadmin = Blueprint('admin', __name__)\n\n\n@admin.route('/dashboard')\n@login_required\ndef dashboard():\n if current_user.is_admin:\n return render_template('admin/dashboard.html', title = 'Admin')\n return redirect(url_for('errors.404')) \n\n@admin.route('/admin_register', methods=['GET','POST'])\ndef admin_register():\n if current_user.is_authenticated:\n return redirect(url_for('shop.index'))\n form = AdminRegisterForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username= form.username.data, email=form.email.data, password = hashed_password, is_admin = True)\n db.session.add(user)\n db.session.commit()\n flash(f'Registered Successfully','success')\n return redirect(url_for('user.login'))\n return render_template('admin/admin_register.html', form=form, title='Admin-Registration')\n\n\n@admin.route('/create_item', methods=['GET','POST'])\n@login_required\ndef create_item():\n if current_user.is_admin:\n form = ItemForm()\n if form.validate_on_submit():\n image_file1 = save_picture1(form.item_pic1.data)\n image_file2 = save_picture2(form.item_pic2.data)\n image_file3 = save_picture3(form.item_pic3.data)\n item = Item(item_pic1 = image_file1, item_pic2 = image_file2, item_pic3 = image_file3, name = form.name.data, description = form.description.data, price = form.price.data )\n db.session.add(item)\n db.session.commit()\n flash(f'Item, {form.name.data} succesfully added to database','success')\n return redirect(url_for('admin.create_item'))\n return render_template('admin/create_item.html', title='Add to Database', form=form)\n return redirect(url_for('errors.404'))\n","repo_name":"alexmagwe/sound-store","sub_path":"app_store/admin/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36743587505","text":"import sys\nsys.path.insert(0, r'D:\\GitHub\\questionnaire_gan\\detection_gan')\nfrom cnn_training_loop import training_loop\nfrom generator_discriminator import CNN_Generator, CNN_Discriminator, Generator\nfrom transformers import get_linear_schedule_with_warmup\nimport wandb\nimport torch\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n\nlayer_sizes = [100, 84000]\n# layer_sizes = [100, 2000, 4000]\n\ngenerator = Generator(layer_sizes, 40)\n\n# with mean 0 and standard deviation 0.02\n# generator.linear_gen_0.weight.data.normal_(0, 1)\n# generator.linear_gen_0.bias.data.fill_(0)\n# generator.linear_gen_1.weight.data.normal_(0, 1)\n# generator.linear_gen_1.bias.data.fill_(0)\n\n\n\ndiscriminator = CNN_Discriminator()\n\ngenerator = generator.to(device)\ndiscriminator = discriminator.to(device)\n\noptimizer_generator = torch.optim.AdamW(generator.parameters(),\n lr=5e-5,\n eps=1e-8, # Epsilon\n weight_decay=0.3,\n amsgrad=True,\n betas = (0.9, 0.999))\n\n\noptimizer_discriminator = torch.optim.AdamW(discriminator.parameters(),\n lr=5e-5,\n eps=1e-8, # Epsilon\n weight_decay=0.3,\n amsgrad=True,\n betas = (0.9, 0.999))\n\n# modified minimax loss\n\ncriterion = torch.nn.BCELoss()\ncritic_range = 1\nmimic_range = 1\nsave_dir = r'D:\\GitHub\\questionnaire_gan\\detection_gan\\models\\test_cnn_2'\nepochs = 160\nbatch_size = 1\n\n# load\n# generator.load_state_dict(torch.load(r'D:\\GitHub\\questionnaire_gan\\detection_gan\\models\\test_cnn_1generator'))\n# discriminator.load_state_dict(torch.load(r'D:\\GitHub\\questionnaire_gan\\detection_gan\\models\\test_cnn_2discriminator'))\n\nscheduler_generator = get_linear_schedule_with_warmup(optimizer_generator,\n num_warmup_steps = 8000,\n num_training_steps= (4000 / batch_size) * epochs)\n\nscheduler_discriminator = get_linear_schedule_with_warmup(optimizer_discriminator,\n num_warmup_steps = 8000,\n num_training_steps= (4000 / batch_size) * epochs)\n\nwandb.init(project=\"detection_gan\", entity=\"hubertp\")\nwandb.watch(generator, log_freq=5)\nwandb.watch(discriminator, log_freq=5)\n\ntraining_loop(generator, discriminator, epochs, batch_size, device, save_dir,\n optimizer_generator, optimizer_discriminator, criterion,\n scheduler_generator, scheduler_discriminator, critic_range,\n mimic_range)\n\n\n","repo_name":"hplisiecki/questionnaire_gan","sub_path":"detection_gan/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72223593043","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0006_post_time_published'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='post',\n name='time_created',\n ),\n migrations.AlterField(\n model_name='post',\n name='time_published',\n field=models.DateTimeField(),\n preserve_default=True,\n ),\n ]\n","repo_name":"katur/blogsite","sub_path":"blog/migrations/0007_auto_20141204_2203.py","file_name":"0007_auto_20141204_2203.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"22762387903","text":"from PyQt5 import QtWidgets, QtCore\r\n#from collections import deque\r\nimport pyqtgraph\r\nimport sys\r\nimport serial\r\nimport glob\r\n#import math\r\n#import time\r\nimport numpy as np\r\nimport queue\r\n#import types\r\nimport re\r\n\r\nimport serplot_ui\r\n#from matplotlib.backends.qt_compat import QtWidgets\r\n\r\n\r\nsettings = {'port': '',\r\n 'baudrate': '115200',\r\n 'n_plots': '3',\r\n 'n_columns': '1',\r\n 'sep': ',',\r\n 'end': '$',\r\n 'xFactor': '1',\r\n 'bufferThr': '0',\r\n 'delaySerial': '3',\r\n 'delayQueue': '3',\r\n 'updateInterval' : '30',\r\n 'serialTimeout' : '300',\r\n 'queueMaxLen': '1000',\r\n 'mode': 'normal'}\r\n\r\n\r\n\r\ndef serial_ports():\r\n if sys.platform.startswith('win'):\r\n ports = ['COM%s' % (i + 1) for i in range(256)]\r\n elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):\r\n ports = glob.glob('/dev/tty[A-Za-z]*')\r\n ports += glob.glob('/dev/tnt*')\r\n elif sys.platform.startswith('darwin'):\r\n ports = glob.glob('/dev/tty.*')\r\n else:\r\n raise EnvironmentError('Unsupported platform')\r\n result = []\r\n for port in ports:\r\n try:\r\n s = serial.Serial(port)\r\n s.close()\r\n result.append(port)\r\n except (OSError, serial.serialutil.SerialException):\r\n pass\r\n return result \r\n \r\n\r\n\r\nclass MainView(QtWidgets.QMainWindow, serplot_ui.Ui_MainWindow):\r\n \r\n sigStopSerialThread = QtCore.pyqtSignal()\r\n sigStopQueueThread = QtCore.pyqtSignal()\r\n\r\n \r\n def __init__(self):\r\n super(self.__class__, self).__init__()\r\n self.setupUi(self)\r\n\r\n pyqtgraph.setConfigOptions(antialias=True)\r\n self.readSettings()\r\n self.plots = []\r\n self.noData = False\r\n \r\n self.timerUpdateGraph = QtCore.QTimer()\r\n self.timerUpdateGraph.setSingleShot(False)\r\n self.timerUpdateGraph.timeout.connect(self.updatePlots)\r\n self.timerSerialTimeout = QtCore.QTimer()\r\n self.timerSerialTimeout.setSingleShot(True)\r\n self.timerSerialTimeout.timeout.connect(self.showNoData)\r\n \r\n self.streamBuffer = queue.Queue()\r\n\r\n self.dataProcessing = False\r\n \r\n self.btnApply.clicked.connect(self.startDataProcessing)\r\n self.btn_clearQueue.clicked.connect(self.clearImgQueue)\r\n self.btnSaveData.clicked.connect(self.saveData)\r\n self.comboBoxChooseSerial.showPopup = self.updatePortsBeforePopup\r\n \r\n \r\n def updatePortsBeforePopup(self):\r\n self.updateSerialPorts()\r\n QtWidgets.QComboBox.showPopup(self.comboBoxChooseSerial) \r\n \r\n \r\n def updateSerialPorts(self):\r\n self.comboBoxChooseSerial.clear()\r\n ports = serial_ports();\r\n for port in ports:\r\n self.comboBoxChooseSerial.addItem(port)\r\n \r\n \r\n def saveData(self):\r\n fname = QtWidgets.QFileDialog.getSaveFileName(\r\n self,\"QFileDialog.getSaveFileName()\",\r\n \"\",\"All Files (*);;Text Files (*.txt)\")\r\n if fname:\r\n f = open(fname[0], 'w')\r\n for data in self.bkp:\r\n f.write(re.sub('[\\[\\] ]', '', str(data)) + '\\n\\r')\r\n \r\n \r\n def readSettings(self):\r\n self.updateSerialPorts()\r\n idx = self.comboBoxChooseSerial.findText(settings['port'])\r\n if idx >= 0:\r\n self.comboBoxChooseSerial.setCurrentIndex(idx)\r\n idx = self.comboBoxBaudrate.findText(settings['baudrate'])\r\n if idx >= 0:\r\n self.comboBoxBaudrate.setCurrentIndex(idx)\r\n idx = self.comboBoxNPlots.findText(settings['n_plots'])\r\n if idx >= 0:\r\n self.comboBoxNPlots.setCurrentIndex(idx) \r\n idx = self.comboBoxNColumns.findText(settings['n_columns'])\r\n if idx >= 0:\r\n self.comboBoxNColumns.setCurrentIndex(idx) \r\n self.lineEditSeparator.setText(settings['sep'])\r\n self.lineEditTerminator.setText(settings['end'])\r\n self.lineEditXFactor.setText(settings['xFactor'])\r\n self.lineEditSerialDelay.setText(settings['delaySerial'])\r\n self.lineEditDelayQueue.setText(settings['delayQueue'])\r\n self.lineEditUpdate.setText(settings['updateInterval'])\r\n self.lineEditQueueMaxLenght.setText(settings['queueMaxLen'])\r\n \r\n \r\n def startDataProcessing(self):\r\n if self.dataProcessing:\r\n self.stopDataProcessing()\r\n self.writeSettings()\r\n self.setupPlots()\r\n self.imgQueue = queue.Queue(maxsize=int(settings['queueMaxLen']))\r\n self.bkp = []\r\n self.dataProcessing = True\r\n self.readSerialBuffer()\r\n self.readQueue()\r\n self.startPlotting()\r\n self.updateView()\r\n \r\n \r\n def writeSettings(self):\r\n settings['port'] = self.comboBoxChooseSerial.currentText()\r\n settings['baudrate'] = self.comboBoxBaudrate.currentText()\r\n settings['n_plots'] = self.comboBoxNPlots.currentText()\r\n settings['n_columns'] = self.comboBoxNColumns.currentText()\r\n settings['sep'] = self.lineEditSeparator.text()\r\n settings['end'] = self.lineEditTerminator.text()\r\n settings['xFactor'] = float(self.lineEditXFactor.text())\r\n settings['delaySerial'] = int(self.lineEditSerialDelay.text())\r\n settings['delayQueue'] = int(self.lineEditDelayQueue.text()) \r\n settings['updateInterval'] = int(self.lineEditUpdate.text())\r\n settings['queueMaxLen'] = int(self.lineEditQueueMaxLenght.text())\r\n \r\n \r\n def updateView(self):\r\n if self.dataProcessing:\r\n self.groupBoxBuffers.setEnabled(True)\r\n \r\n \r\n def setupPlots(self):\r\n for p in self.plots:\r\n self.pltLayout.removeItem(p)\r\n self.plots = []\r\n n_plots = int(settings['n_plots'])\r\n n_columns = int(settings['n_columns'])\r\n for i in range(n_plots):\r\n row = i // n_columns\r\n col = i % n_columns\r\n p = self.pltLayout.addPlot(row=row, col=col, title=str(i))\r\n p.showGrid(x=True, y=False)\r\n self.plots.append(p) \r\n self.active_plot = 0\r\n \r\n \r\n def showErrorMsg(self, msg):\r\n print('', msg)\r\n \r\n \r\n def showInfoMsg(self, msg):\r\n self.statusbar.showMessage(msg)\r\n \r\n \r\n def readQueue(self):\r\n self.queueReader = QueueReader(self)\r\n self.queueReader.sigDataReady.connect(self.updatePlots)\r\n self.readQueueThread = QtCore.QThread(self)\r\n self.queueReader.moveToThread(self.readQueueThread)\r\n self.readQueueThread.started.connect(self.queueReader.start)\r\n print('start queue thread')\r\n self.readQueueThread.start()\r\n \r\n \r\n def startPlotting(self):\r\n self.pause = False\r\n self.timerUpdateGraph.start(settings['updateInterval']//len(self.plots))\r\n self.btnSaveData.setEnabled(False)\r\n \r\n \r\n def stopPlotting(self):\r\n self.pause = True\r\n self.btn_run.clicked.connect(self.startPlotting)\r\n self.btn_run.setText('Run')\r\n self.btnSaveData.setEnabled(True)\r\n \r\n \r\n @QtCore.pyqtSlot()\r\n def updatePlots(self):\r\n self.showStreamBufferSize()\r\n self.lineEditImgBuffer.setText(str(self.imgQueue.qsize()))\r\n try:\r\n if not self.pause:\r\n self.y_data = self.imgQueue.get(block=False)\r\n curve = self.plots[self.active_plot].plot(pen='g', clear=True)\r\n x_data = [x * settings['xFactor'] for x, y in enumerate(self.y_data)]\r\n curve.setData(x_data, self.y_data)\r\n self.active_plot = (self.active_plot + 1) % int(settings['n_plots'])\r\n \r\n self.bkp.append(self.y_data)\r\n if len(self.bkp) > int(settings['n_plots']):\r\n self.bkp.pop(0)\r\n \r\n self.btn_run.clicked.connect(self.stopPlotting)\r\n self.btn_run.setEnabled(True)\r\n self.btn_run.setText('Pause') \r\n except queue.Empty:\r\n if not self.dataProcessing or self.noData:\r\n self.btn_run.setEnabled(False)\r\n self.btn_run.setText('Run')\r\n \r\n \r\n @QtCore.pyqtSlot() \r\n def showStreamBufferSize(self):\r\n self.lineEditStreamBuffer.setText(str(self.streamBuffer.qsize()))\r\n \r\n \r\n @QtCore.pyqtSlot(int)\r\n def showBufferLoad(self, n_byte):\r\n self.lineEditSerialBuffer.setText(str(n_byte))\r\n if n_byte:\r\n self.noData = False\r\n self.showInfoMsg('Receiving data at %s' %settings['port'])\r\n \r\n \r\n @QtCore.pyqtSlot()\r\n def showNoData(self):\r\n self.showInfoMsg('No data at %s' %settings['port'])\r\n self.noData = True\r\n \r\n \r\n @QtCore.pyqtSlot()\r\n def serialTimeout(self):\r\n if not self.timerSerialTimeout.isActive():\r\n self.timerSerialTimeout.start(int(settings['serialTimeout']))\r\n self.showBufferLoad(0)\r\n \r\n \r\n def readSerialBuffer(self):\r\n self.serialReader = SerialReader(self)\r\n self.serialReader.sigBufferLoad.connect(self.showBufferLoad)\r\n self.serialReader.sigNoData.connect(self.serialTimeout)\r\n self.serialReader.sigSerialError.connect(self.handleSerialError)\r\n self.serialThread = QtCore.QThread(self)\r\n self.serialReader.moveToThread(self.serialThread)\r\n self.serialThread.started.connect(self.serialReader.start)\r\n print('start serial thread')\r\n self.serialThread.start()\r\n \r\n \r\n def stopDataProcessing(self):\r\n print('stop queue thread')\r\n self.readQueueThread.quit()\r\n self.readQueueThread.wait()\r\n print('stop serial thread')\r\n self.serialThread.quit()\r\n self.serialThread.wait()\r\n self.streamBuffer.queue.clear()\r\n self.dataProcessing = False \r\n\r\n\r\n @QtCore.pyqtSlot()\r\n def handleSerialError(self):\r\n self.stopDataProcessing()\r\n self.statusbar.showMessage('Check serial port!!')\r\n self.comboBoxChooseSerial.clear()\r\n self.timerSerialTimeout.stop()\r\n \r\n \r\n def clearImgQueue(self):\r\n self.imgQueue.queue.clear()\r\n if self.noData:\r\n self.btnSaveData.setEnabled(False)\r\n self.btn_run.setEnabled(False)\r\n \r\n\r\n\r\nclass QueueReader(QtCore.QObject):\r\n \r\n sigErrorMsg = QtCore.pyqtSignal(object)\r\n sigDataReady = QtCore.pyqtSignal(object)\r\n sigQueueCleared = QtCore.pyqtSignal()\r\n \r\n def __init__(self, parent):\r\n super().__init__()\r\n self.parent = parent\r\n self.s_values = []\r\n self.s_value = ''\r\n self.streamBuffer = parent.streamBuffer\r\n\r\n \r\n def readQueue(self):\r\n f_values = []\r\n s = ''\r\n try:\r\n b = self.streamBuffer.get(block=False)\r\n s = b.decode('utf-8')\r\n except UnicodeDecodeError:\r\n print('can\\'t decode bytes!!', b)\r\n except queue.Empty:\r\n pass\r\n \r\n for c in s:\r\n if c == settings['sep']:\r\n if self.s_value:\r\n self.s_values.append(self.s_value)\r\n self.s_value = ''\r\n elif c == settings['end']:\r\n try:\r\n f_values = [float(v) for v in self.s_values]\r\n except ValueError:\r\n print('ValueError!!', self.s_values)\r\n self.parent.imgQueue.put(f_values)\r\n self.s_values = []\r\n f_values = []\r\n else:\r\n self.s_value += c\r\n self.timer.start(settings['delayQueue'])\r\n \r\n \r\n def start(self):\r\n self.timer = QtCore.QTimer()\r\n self.timer.setSingleShot(True)\r\n self.timer.timeout.connect(self.readQueue)\r\n self.timer.start(settings['delayQueue'])\r\n \r\n\r\n\r\nclass SerialReader(QtCore.QObject):\r\n \r\n sigNoData = QtCore.pyqtSignal()\r\n sigBufferLoad = QtCore.pyqtSignal(int)\r\n sigSerialError = QtCore.pyqtSignal()\r\n \r\n \r\n def __init__(self, parent):\r\n super().__init__()\r\n self.parent = parent\r\n self.streamBuffer = parent.streamBuffer\r\n try:\r\n self.ser = serial.Serial(settings['port'], settings['baudrate'])\r\n except serial.serialutil.SerialException:\r\n self.sigSerialError.emit()\r\n \r\n \r\n def readSerialBuffer(self):\r\n try:\r\n n_bytes = self.ser.in_waiting\r\n if n_bytes:\r\n b = self.ser.read(n_bytes)\r\n self.streamBuffer.put(b)\r\n self.sigBufferLoad.emit(n_bytes)\r\n else:\r\n self.sigNoData.emit()\r\n except (serial.serialutil.SerialException, OSError):\r\n self.sigSerialError.emit()\r\n self.timer.start(settings['delaySerial'])\r\n \r\n \r\n def start(self):\r\n self.ser.flushInput()\r\n self.timer = QtCore.QTimer()\r\n self.timer.setSingleShot(True)\r\n self.timer.timeout.connect(self.readSerialBuffer)\r\n self.timer.start(settings['delaySerial'])\r\n\r\n\r\n\r\nif __name__ == '__main__': \r\n app = QtWidgets.QApplication(sys.argv) \r\n form = MainView() \r\n form.show() \r\n app.exec_() ","repo_name":"gagahan/serplot","sub_path":"serplot.py","file_name":"serplot.py","file_ext":"py","file_size_in_byte":13650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5046801027","text":"\"\"\"\nread outcomes of training/ models tunings\n\n@author: Tanya\n\"\"\"\n\n\n#usual imports\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\nfrom sklearn.utils import shuffle\nfrom sklearn import metrics\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import StandardScaler, RobustScaler\nimport os\nimport matplotlib.pyplot as plt\nimport os.path\nimport csv\n\n\n\ndef load_results(file_path, model_name, num, lr):\n \"\"\"\n loads results from the given path to dataframe\n \"\"\"\n file_name = os.path.join(file_path, model_name + str(num) + str(lr) + \"_params.csv\") # parameters \n params_df = pd.read_csv(file_name) # read data as csv to a dataframe\n \n file_name = os.path.join(file_path, model_name + str(num) + str(lr) + \"_history.csv\") # train history \n history_df = pd.read_csv(file_name) # read data as csv to a dataframe\n\n return params_df, history_df\n\n\ndef load_params(params_df):\n \"\"\"\n read models parameters from a dataframe\n \"\"\"\n decay = float(params_df.loc[:,'decay'])\n lr = float(params_df.loc[:,'lr'])\n batch_size = int(params_df.loc[:,'batch_size'])\n kernel_size = int(params_df.loc[:,'kernel_size'])\n num_filters = int(params_df.loc[:,'num_filters'])\n #print(decay)\n \n return decay, lr, batch_size, kernel_size, num_filters\n\n \n#file_path = '/home/psollai/tanya/ctg/models_results/FCN_5_tuning/' \nfile_path = 'C:/Users/New/Documents/Datasets/results_300_ave80/'\nmodel_name = 'FCN10ave_24_'\n#model_name = 'FCN_RS_5_tune'\n\n# test load training results\nparams, history = load_results(file_path, model_name, 24, 0.0005)\nprint('params: ', params,' history: ', history)\ndecay, lr, batch_size, kernel_size, num_filters = load_params(params)\n\n\ndef plot_acc(file_path, model_name, num_filters, lr):\n \"\"\"\n plots accuracy for various models, returns fig\n \"\"\"\n fig = plt.figure(1, figsize=(9, 7))\n plt.ylabel('Accuracy', fontsize = 16)\n plt.xlabel('# epochs', fontsize = 16) \n plt.title('Accuracy, '+model_name, fontsize = 18)\n info = [] #list of strings\n colors = ['r', 'b', 'g', 'm', 'k', 'c', '--r', '--b', '--g', '--m', '--k','--c', \\\n 'r', 'b', 'g', 'm', 'k', 'c', '--r', '--b', '--g', '--m', '--k','--c']\n \n for num in num_filters:\n params, history = load_results(file_path, model_name, num, lr)\n color = colors[1]\n acc = history.loc[:,'acc']\n plt.plot(acc, color)\n color = colors[0]\n val_acc = history.loc[:,'val_acc']\n plt.plot(val_acc, color)\n #add a legend with parameters\n decay, learning_rate, batch_size, kernel_size, num_filters = load_params(params)\n info.append('decay: '+str(decay)+', batch: '+str(batch_size)+', kernel: '+ \\\n str(kernel_size)+', # filters:'+str(num_filters)) #append a line to a legend list \n plt.legend(info, loc='lower right', bbox_to_anchor=(1, 0.0))\n \n # plt.text(6, 0.75, 'learning rate: '+str(learning_rate), fontsize = 14)\n plt.grid() \n \n return fig\n\n\ndef plot_auc(file_path, model_name, num_filters, lr):\n \"\"\"\n plots accuracy for various models, returns fig\n \"\"\"\n fig = plt.figure(2, figsize=(9, 7))\n plt.ylabel('AUC', fontsize = 16)\n plt.xlabel('# epochs', fontsize = 16)\n plt.grid() \n plt.title('AUC, '+model_name, fontsize = 18)\n info = [] #list of strings\n colors = ['r', 'b', 'g', 'm', 'k', 'c', '--r', '--b', '--g', '--m', '--k','--c', \\\n 'r', 'b', 'g', 'm', 'k', 'c', '--r', '--b', '--g', '--m', '--k','--c']\n \n for num in num_filters:\n params, history = load_results(file_path, model_name, num, lr)\n color = colors[1]\n auc = history.loc[:,'auc']\n plt.plot(auc, color)\n color = colors[0]\n val_auc = history.loc[:,'val_auc']\n plt.plot(val_auc, color)\n #add a legend with parameters\n decay, learning_rate, batch_size, kernel_size, num_filters = load_params(params)\n info.append('decay: '+str(decay)+', batch: '+str(batch_size)+', kernel: '+ \\\n str(kernel_size)+', # filters:'+str(num_filters)) #append a line to a legend list \n plt.legend(info, loc='lower right', bbox_to_anchor=(1, 0.0))\n \n #plt.text(6, 0.75, 'learning rate: '+str(learning_rate), fontsize = 14)\n \n return fig\n\n\ndef plot_loss(file_path, model_name, num_filters, lr):\n \"\"\"\n plots accuracy for various models, returns fig\n \"\"\"\n fig = plt.figure(3, figsize=(9, 7))\n plt.ylabel('Loss (binary crossentrophy)', fontsize = 16)\n plt.xlabel('# epochs', fontsize = 16)\n plt.grid() \n plt.title('Loss, '+model_name, fontsize = 18)\n info = [] #list of strings\n colors = ['r', 'b', 'g', 'm', 'k', 'c', '--r', '--b', '--g', '--m', '--k','--c', \\\n 'r', 'b', 'g', 'm', 'k', 'c', '--r', '--b', '--g', '--m', '--k','--c']\n \n for num in num_filters:\n params, history = load_results(file_path, model_name, num, lr)\n color = colors[1]\n loss = history.loc[:,'loss']\n plt.plot(loss, color)\n color = colors[0]\n val_loss = history.loc[:,'val_loss']\n plt.plot(val_loss, color)\n #add a legend with parameters\n decay, learning_rate, batch_size, kernel_size, num_filters = load_params(params)\n info.append('decay: '+str(decay)+', batch: '+str(batch_size)+', kernel: '+ \\\n str(kernel_size)+', # filters:'+str(num_filters)) #append a line to a legend list \n plt.legend(info, loc='lower right', bbox_to_anchor=(1, 0.0))\n \n plt.text(6, 0.75, 'learning rate: '+str(learning_rate), fontsize = 14)\n \n return fig\n\n\n# test plot results for different model parameters \nlr = 0.0005\nnum = [24]\nplot_models_results = True\n\nif plot_models_results:\n #test fig_acc \n fig_acc = plot_acc(file_path, model_name, num, lr) \n fig_acc.savefig(file_path + str(model_name) + str(num) + '_acc.png')\n \n fig_auc = plot_auc(file_path, model_name, num, lr) \n fig_auc.savefig(file_path + str(model_name) + str(num) + '_auc.png')\n \n fig_loss = plot_loss(file_path, model_name, num, lr) \n fig_loss.savefig(file_path + str(model_name) + str(num) + '_loss.png')\n \n\n \n \n\n","repo_name":"tatigabru/transforms","sub_path":"src/helpers/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40586413984","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 22 09:19:45 2021\n\n@author: 600037209\n\"\"\"\n\ntry:\n x = int(input(\"Enter x value:\"))\n y = int(input(\"Enter y value:\"))\n \n divison = x/y\n\n print(divison)\n\nexcept Exception as e:\n print(e)\nelse:\n print('This is the else block')\nfinally:\n print('This is the final block')\n \n\ntry:\n x = int(input(\"Enter x value:\"))\n y = int(input(\"Enter y value:\"))\n \n divison = x/y\n\n print(divison)\n\nexcept:\n print('There is some issue in the logic')\n","repo_name":"Hemanthkaruturi/Data-Science-with-Python-tutorials","sub_path":"6. excption1.py","file_name":"6. excption1.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22188476331","text":"#Query String Parameters\n\nimport requests\n\nurl = 'https://api.github.com/search/repositories'\n# Search GitHub's repositories for requests\nresponse = requests.get(url, params={'q': 'requests+language:python'},\n)\n\n# Inspect some attributes of the `requests` repository\njson_response = response.json()\nrepository = json_response['items'][0]\nprint(f'Repository name: {repository[\"name\"]}') # Python 3.6+\nprint(f'Repository description: {repository[\"description\"]}')\n\n\n#You can pass params to get() in the form of a dictionary,\n# as you have just done, or as a list of tuples:\n\nrequests.get(url, params=[('q', 'requests+language:python')],)\n\n#You can even pass the values as bytes:\n\nrequests.get( url, params=b'q=requests+language:python',)\n\n","repo_name":"iuyt9003/pythonexamples","sub_path":"examples/requests_examples/requests_example_query_parameters.py","file_name":"requests_example_query_parameters.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"2153194928","text":"import logging\nimport unittest\n\nimport mocks\n\nimport snipe.imbroglio as imbroglio\nimport snipe.keymap as keymap\nimport snipe.util as util\nimport snipe.window as window\n\nfrom snipe.chunks import (Chunk)\n\n\nclass TestWindow(unittest.TestCase):\n def test_init(self):\n w = window.Window(mocks.FE())\n w.renderer = mocks.Renderer()\n w.cursor = object()\n x = window.Window(mocks.FE(), prototype=w)\n self.assertIs(w.cursor, x.cursor)\n\n def test_balance_windows(self):\n with mocks.mocked_up_actual_fe_window(window.Window) as w:\n w.split_window()\n w.split_window()\n w.balance_windows()\n self.assertEqual([w.height for w in w.fe.windows], [8, 8, 8])\n\n def test_enlarge_windows(self):\n with mocks.mocked_up_actual_fe_window(window.Window) as w:\n w.split_window()\n w.enlarge_window()\n self.assertEqual([w.height for w in w.fe.windows], [13, 11])\n\n def test_mode(self):\n class AMode:\n cheatsheet = ['foo']\n w = window.Window(None, modes=[AMode()])\n self.assertEqual(w.cheatsheet[-1], 'foo')\n\n @imbroglio.test\n async def test_input_char(self):\n with mocks.mocked_up_actual_fe_window(window.Window) as w:\n w.fe.supervisor = await imbroglio.get_supervisor()\n save = []\n w.intermediate_action = (\n lambda keystroke=None: save.append(keystroke))\n with self.assertLogs(w.log.name, level='ERROR'):\n w.input_char('x')\n self.assertEqual(w.context._message, 'unknown command')\n w.keyerror_action = lambda k: save.append('E' + k)\n w.input_char('x')\n self.assertEqual(save, ['x', 'x', 'Ex'])\n\n w.keymap['y'] = lambda: save.append('y')\n w.input_char('y')\n self.assertEqual(save, ['x', 'x', 'Ex', 'y', 'y'])\n\n w.intermediate_action = None\n w.keymap['z'] = keymap.Keymap()\n w.keymap['z']['Z'] = lambda: save.append('z')\n w.input_char('z')\n w.input_char('Z')\n self.assertEqual(save, ['x', 'x', 'Ex', 'y', 'y', 'z'])\n\n async def key_action():\n save.append('0')\n w.keymap['0'] = key_action\n self.assertFalse(w.tasks)\n w.input_char('0')\n self.assertTrue(w.tasks)\n await w.tasks[0]\n self.assertEqual(save, ['x', 'x', 'Ex', 'y', 'y', 'z', '0'])\n\n w.keymap['1'] = lambda: {}[None]\n\n with self.assertLogs(w.log, logging.ERROR):\n w.input_char('1')\n\n async def key_raises():\n {}[None]\n w.keymap['2'] = key_raises\n w.input_char('2')\n with self.assertLogs(w.log, logging.ERROR):\n await w.tasks[0]\n\n def test_misc(self):\n called = False\n\n def destroy_cb():\n nonlocal called\n called = True\n with mocks.mocked_up_actual_fe_window(\n lambda fe: window.Window(fe, destroy=destroy_cb)) as w:\n self.assertEqual(w.focus(), True)\n w.destroy()\n self.assertEqual(called, True)\n\n self.assertEqual(w.title(), 'Window')\n self.assertEqual(w.modeline(), (\n [(set(), 'Window')],\n [({'right'}, '1')]))\n\n def test_search_interface(self):\n with mocks.mocked_up_actual_fe_window(window.Window) as w:\n self.assertRaises(NotImplementedError, lambda: w.find('foo'))\n self.assertRaises(NotImplementedError, lambda: w.match('foo'))\n self.assertRaises(NotImplementedError, w.beginning)\n self.assertRaises(NotImplementedError, w.end)\n self.assertEqual(w.make_mark(None), None)\n\n def test_set_cheatsheet(self):\n w = window.Window(None)\n c = []\n w.set_cheatsheet(c)\n self.assertIs(w.cheatsheet, c)\n self.assertIs(w._normal_cheatsheet, c)\n\n k = keymap.Keymap()\n k.set_cheatsheet(['bar'])\n w.maybe_install_cheatsheet(k)\n self.assertEqual(w.cheatsheet, ['bar'])\n\n def test_cheatsheetify(self):\n cheatsheetify = window.StatusLine.cheatsheetify\n TAGS = window.StatusLine.KEYTAGS\n self.assertEqual(cheatsheetify(''), [])\n self.assertEqual(cheatsheetify('foo'), [(set(), 'foo')])\n self.assertEqual(cheatsheetify('*foo*'), [(TAGS, 'foo')])\n self.assertEqual(\n cheatsheetify('f*o*o'), [(set(), 'f'), (TAGS, 'o'), (set(), 'o')])\n self.assertEqual(cheatsheetify(r'f\\*o'), [(set(), 'f*o')])\n self.assertEqual(cheatsheetify('f**oo'), [(set(), 'foo')])\n self.assertEqual(\n cheatsheetify(r'f*\\*oo'), [(set(), 'f'), (TAGS, '*oo')])\n\n def test_read_string(self):\n import snipe.prompt as prompt\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n i = fe.windows[0].window.read_string(\n ': ', validate=lambda x: x == 'foo')\n i.send(None)\n self.assertIsInstance(fe.windows[1].window, prompt.ShortPrompt)\n with self.assertRaises(util.SnipeException):\n fe.windows[1].window.callback('bar')\n fe.windows[1].window.callback('foo')\n try:\n while True:\n i.send(None)\n except StopIteration as stop:\n val = stop.value\n\n self.assertEquals(val, 'foo')\n\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n i = fe.windows[0].window.read_string(': ', height=5)\n i.send(None)\n self.assertIsInstance(fe.windows[1].window, prompt.LongPrompt)\n fe.delete_window(1)\n with self.assertRaises(Exception):\n while True:\n i.send(None)\n\n def test_read_filename(self):\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n i = fe.windows[0].window.read_filename(': ')\n i.send(None)\n fe.windows[1].window.callback('foo')\n try:\n while True:\n i.send(None)\n except StopIteration as stop:\n val = stop.value\n\n self.assertEquals(val, 'foo')\n\n def test_read_keyseq(self):\n import snipe.prompt as prompt\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n i = fe.windows[0].window.read_keyseq(\n ': ', keymap=fe.windows[0].window.keymap)\n i.send(None)\n self.assertIsInstance(fe.windows[1].window, prompt.KeySeqPrompt)\n fe.windows[1].window.callback('foo')\n try:\n while True:\n i.send(None)\n except StopIteration as stop:\n val = stop.value\n\n self.assertEquals(val, 'foo')\n\n def test_show(self):\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.show('foo')\n self.assertEquals(len(fe.windows), 2)\n self.assertEquals(\n ''.join(\n str(chunk)\n for (mark, chunk) in fe.windows[1].window.view(0)),\n 'foo')\n\n def test_quit(self):\n with mocks.mocked_up_actual_fe_window(window.Window) as w:\n self.assertFalse(w.fe.quit)\n w.quit()\n self.assertTrue(w.fe.quit)\n\n def test_stop(self):\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n d = {'called': False}\n fe.sigtstp = lambda *args: d.__setitem__('called', True)\n self.assertFalse(d['called'])\n fe.windows[0].window.stop()\n self.assertTrue(d['called'])\n\n def test_other(self):\n import snipe.editor as editor\n import snipe.messager as messager\n import snipe.repl as repl\n with mocks.mocked_up_actual_fe(window.Window) as fe:\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.split_window()\n self.assertEquals(len(fe.windows), 2)\n fe.windows[0].window.delete_window()\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.split_window()\n self.assertEquals(len(fe.windows), 2)\n fe.windows[0].window.delete_other_windows()\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.split_window()\n self.assertEquals(len(fe.windows), 2)\n self.assertEquals(fe.output, 0)\n fe.windows[0].window.other_window()\n self.assertEquals(fe.output, 1)\n fe.windows[0].window.delete_other_windows()\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.split_to_editor()\n self.assertEquals(len(fe.windows), 2)\n self.assertEquals(fe.output, 1)\n self.assertIsInstance(fe.windows[1].window, editor.Editor)\n fe.windows[1].window.delete_window()\n self.assertEquals(len(fe.windows), 1)\n\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.split_to_messager()\n self.assertEquals(len(fe.windows), 2)\n self.assertEquals(fe.output, 1)\n self.assertIsInstance(fe.windows[1].window, messager.Messager)\n fe.windows[1].window.delete_window()\n self.assertEquals(len(fe.windows), 1)\n\n self.assertEquals(len(fe.windows), 1)\n fe.windows[0].window.split_to_repl()\n self.assertEquals(len(fe.windows), 2)\n self.assertEquals(fe.output, 1)\n self.assertIsInstance(fe.windows[1].window, repl.REPL)\n fe.windows[1].window.delete_window()\n self.assertEquals(len(fe.windows), 1)\n\n\nclass TestStatusLine(unittest.TestCase):\n def test(self):\n with mocks.mocked_up_actual_fe_window(\n window.Window, window.StatusLine) as w:\n s = w.context.status\n self.assertEqual([Chunk([\n (('visible',), ''),\n ((), 'Window'),\n (('right',), '1'),\n ((), 'You'),\n ((), ' '),\n ((), 'be'),\n ((), ' '),\n ((), 'this'),\n ((), ' '),\n (('bold',), '^Z'),\n ((), ' suspend'),\n ((), '\\n'),\n ((), \"shouldn't\"),\n ((), ' '),\n ((), 'seeing'),\n ((), ' '),\n (('bold',), '^X^C'),\n ((), ' quit'),\n ((), ' '),\n (('bold',), '?'),\n ((), ' help'),\n ((), '\\n')\n ])], [chunk for (mark, chunk) in s.view(0)])\n\n w.context.conf['set'] = {'cheatsheet': False}\n w.fe.resize_statuswindow()\n self.assertEqual([Chunk([\n (('visible',), ''),\n ((), 'Window'),\n (('right',), '1'),\n ])], [chunk for (mark, chunk) in s.view(0)])\n\n s.message('X' * 80)\n self.assertEqual([Chunk([\n ({'visible'}, ''),\n ({'fg:white', 'bg:red'}, '…' + ('X' * 77)),\n ({'right'}, '1'),\n ])], [chunk.tagsets() for (mark, chunk) in s.view(0)])\n\n # defensiveness time\n\n class W2(window.Window):\n def modeline(self):\n return None, []\n\n s.clear()\n w.fe.split_window(W2(w.fe), True)\n\n self.assertEqual([[\n ({'visible'}, ''),\n ]], [chunk for (mark, chunk) in s.view(0)])\n\n w.fe.output = 99\n self.assertEqual([Chunk([\n (('visible',), ''),\n ((), 'StatusLine'),\n (('right',), '1'),\n ])], [chunk for (mark, chunk) in s.view(0)])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"1ts-org/snipe","sub_path":"tests/test_window.py","file_name":"test_window.py","file_ext":"py","file_size_in_byte":12082,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"3"} +{"seq_id":"86263558872","text":"import time\nimport scrapy\nfrom scrapy.http import FormRequest\nfrom scrapy.utils.response import open_in_browser\n\nclass UsersSpider(scrapy.Spider):\n name = 'quote'\n start_urls = [\n 'https://www.strava.com/login'\n ]\n\n def parse(self, response):\n token = response.css('meta::attr(content)').extract_first()\n print(token)\n return FormRequest.from_response(response, formdata={\n 'csrf_token': token,\n 'username': 'anika.bhad@ncsu.edu',\n 'password': 'Password'\n }, callback=self.start_scraping)\n\n def start_scraping(self, response):\n # Number of pages that are going to be crawled\n depth = 10\n # Loop going from 1-10 crawling each page refrenced\n for pageNumber in range(depth):\n # yield response.follow(x,callback=self.parse) is the\n # function to crawl the page. This means each page in this loop will be crawled\n yield response.follow('https://www.strava.com/athletes/search?gsf=1&page=' + str(pageNumber + 1) + '&page_'\n + 'uses_modern_javascript=true&text=&utf8=%E2%9C%93', callback=self.parse)\n all_users = response.css('li.row')\n for user in all_users:\n name = user.css('div.text-headline::text').extract()\n location = user.css('div.location::text').extract()\n\n objToYield = {'name': name, 'location': location, 'crawlTime': time.time()}\n yield objToYield\n\n\n","repo_name":"albhadri/StravaScrapyFinal","sub_path":"stravaScrape/stravaScrape/spiders/spiderBackup.py","file_name":"spiderBackup.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25057420583","text":"import re\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nimport tensorflow as tf\nfrom word_preprocessing import get_path, clean, pre_processing, MyTokenizer\nfrom model import SentimentalClassification\n\ntrain_path = get_path('archive/train.txt')\ntest_path = get_path('archive/test.txt')\nval_path = get_path('archive/val.txt')\n\n\n\nX_train, y_train = pre_processing(train_path)\nX_test, y_test = pre_processing(test_path)\nX_val, y_val = pre_processing(val_path)\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\n#tokenizer = Tokenizer()\n#tokenizer.fit_on_texts(pd.concat([X_train, X_test], axis=0))\ntemp = MyTokenizer()\ntokenizer = temp.myTokenizer()\n\ntrain_seq = tokenizer.texts_to_sequences(X_train)\ntest_seq = tokenizer.texts_to_sequences(X_test)\nval_seq = tokenizer.texts_to_sequences(X_val)\n\nX_train = pad_sequences(train_seq, maxlen=256, truncating='pre')\nX_test = pad_sequences(test_seq, maxlen=256, truncating='pre')\nX_val = pad_sequences(val_seq, maxlen=256, truncating='pre')\n\nvocabSize = len(tokenizer.index_word) + 1\n\nmaxlen = X_train.shape[1]\nembedding_size = 200\nfilters = 128\n\nclf = SentimentalClassification(vocabSize, embedding_size, maxlen, filters=filters, lstm_size=128)\nprint(clf.model.summary())\nclf.compile_model()\nwith tf.device(\"/device:GPU:0\"):\n fit= clf.model.fit(X_train,\n y_train,\n validation_data=(X_val, y_val),\n verbose=1,\n batch_size=256,\n epochs=30,\n callbacks=[clf.early_stop]\n )\n\nclf.model.evaluate(X_test, y_test, verbose=1)\n\n#clf.model.save('SentimentalClassification.h5')","repo_name":"augustinLib/emoji_recommendation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33607104073","text":"'''\r\nCreated on Apr 22, 2018\r\n\r\n@author: varunjai\r\n'''\r\nfrom com.varun.player.AIPlayer import AIPlayer\r\n'''\r\nConfig\r\n'''\r\n\r\nimport os\r\n\r\nfrom flask import Flask, render_template, request, session\r\nfrom flask_socketio import SocketIO\r\n\r\nfrom com.varun.game.Game import Game\r\nfrom com.varun.game.PlayerData import PlayerData\r\n\r\n# configuration\r\ngame_server = Flask(__name__)\r\ngame_server.session_key = str(os.urandom(24))\r\ngame_server.config['SECRET_KEY'] = 'secret!'\r\nsocketio = SocketIO(game_server)\r\n\r\ngame = None\r\nmp_players_data = []\r\nsp_players_data = []\r\naiplayer = None\r\n\r\n\r\n@socketio.on('move_event')\r\ndef handle_message(message):\r\n print('received message: ' + message)\r\n\r\n\r\n@game_server.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n\r\n@game_server.route('/login')\r\ndef login():\r\n mode = request.args.get('mode')\r\n if(session and 'user' in session):\r\n if(mode == 'single-player'):\r\n return start_sp_game()\r\n else:\r\n return __start(mp_players_data, 'startmp.html')\r\n\r\n return render_template('login.html')\r\n\r\n\r\n@game_server.route('/startmp', methods=['POST'])\r\ndef start_game():\r\n global mp_players_data\r\n # register users\r\n if('user' not in session or session['user'] is not request.form['user']):\r\n mp_players_data.append(PlayerData(request.form['user'], 'X' if len(mp_players_data) == 0 else 'O'))\r\n session['user'] = request.form['user']\r\n\r\n return __start(mp_players_data, 'startmp.html')\r\n\r\n\r\n'''\r\nStart a single player game.\r\nThis creates an instance of AI player as the second instance of the player\r\n\r\n\r\nSee if the game is set to none then it means that even if the users are present\r\nthe game has been reset. This is an instance where a user is playing another game\r\n'''\r\n\r\n\r\n@game_server.route('/startsp', methods=['POST'])\r\ndef start_sp_game():\r\n global sp_players_data\r\n global aiplayer\r\n global game\r\n\r\n # if two players already added\r\n if(len(sp_players_data) == 2):\r\n # if its not a new game\r\n if(game is not None and aiplayer is not None):\r\n return __start(sp_players_data, 'startsp.html')\r\n else:\r\n result = __start(sp_players_data, 'startsp.html')\r\n aiplayer = AIPlayer('AI', 'O', game)\r\n return result\r\n\r\n # register users\r\n if('user' not in session or session['user'] is not request.form['user']):\r\n sp_players_data.append(PlayerData(request.form['user'], 'X'))\r\n session['user'] = request.form['user']\r\n sp_players_data.append(PlayerData('AI', 'O'))\r\n result = __start(sp_players_data, 'startsp.html')\r\n aiplayer = AIPlayer('AI', 'O', game)\r\n return result\r\n\r\n return __start(sp_players_data, 'startsp.html')\r\n\r\n\r\ndef __start(players_data, template):\r\n\r\n # wait if mp_players_data are inadequate\r\n if(len(players_data) != 2):\r\n print('Waiting for other player to join')\r\n return render_template('wait.html')\r\n\r\n # launch game\r\n global game\r\n game = Game(players_data[0], players_data[1], 3)\r\n socketio.emit('start_event')\r\n return render_template(template)\r\n\r\n\r\n'''\r\nJoin the game\r\n'''\r\n\r\n\r\n@game_server.route('/join')\r\ndef join_game():\r\n if(len(mp_players_data) != 2):\r\n print('Waiting for other player to join')\r\n return render_template('wait.html')\r\n return render_template('startmp.html')\r\n\r\n\r\n'''\r\nGet the player who is having the current turn\r\n'''\r\n\r\n\r\n@game_server.route('/currentplayer')\r\ndef current_player():\r\n global game\r\n if(game is None):\r\n return '', 401\r\n return game.get_current_player().get_player_name(), 200\r\n\r\n\r\n@game_server.route('/move')\r\ndef move_mp():\r\n global game\r\n # if not the player with current turn\r\n if(session['user'] != game.get_current_player().get_player_name()):\r\n return '', 201\r\n\r\n # check params\r\n x = request.args.get('x')\r\n y = request.args.get('y')\r\n\r\n return __move(int(x), int(y))\r\n\r\n\r\n'''\r\nMake the move on the board\r\n'''\r\n\r\n\r\ndef __move(x: int, y: int):\r\n\r\n if(x is None or y is None):\r\n return '', 400\r\n\r\n # make move\r\n global game\r\n result = game.move(x, y)\r\n # invalid move\r\n if(result is None or result is ''):\r\n return '', 201\r\n\r\n # valid move\r\n socketio.emit('move_event', data=\"x:\" + str(x) + \" y:\" + str(y) + \" style:\" + result)\r\n return '', 201\r\n\r\n\r\n@game_server.route('/movesp')\r\ndef move_sp():\r\n\r\n if(game is None):\r\n return '', 401\r\n \r\n if(game.get_current_player().get_player_name() == 'AI'):\r\n # AI player move\r\n global aiplayer\r\n move_param = aiplayer.move()\r\n\r\n if(move_param is None):\r\n return '', 201\r\n return __move(move_param.getXval(), move_param.getYval())\r\n\r\n # if not the player with current turn and not AI\r\n if(session['user'] != game.get_current_player().get_player_name()):\r\n return '', 201\r\n\r\n # if not the player with current turn\r\n # check params\r\n x = request.args.get('x')\r\n y = request.args.get('y')\r\n return __move(int(x), int(y))\r\n\r\n\r\n@game_server.route('/checkgame')\r\ndef check_game():\r\n global game\r\n return game.checkGame(), 200\r\n\r\n\r\n@game_server.route('/exitgame')\r\ndef exit_game():\r\n print(\"exiting game session\")\r\n global game\r\n global aiplayer\r\n game = None\r\n aiplayer = None\r\n return render_template('index.html')\r\n\r\n\r\n'''\r\nShutdown code\r\n'''\r\n\r\n\r\ndef shutdown_server():\r\n if(session and 'user' in session):\r\n session.pop('user', None)\r\n \r\n exit_game()\r\n func = request.environ.get('werkzeug.server.shutdown')\r\n if func is None:\r\n raise RuntimeError('Not running with the Werkzeug Server')\r\n func()\r\n\r\n\r\n@game_server.route(\"/shutdown\")\r\ndef shutdown():\r\n shutdown_server()\r\n return \"OK\", 200\r\n\r\n\r\n'''\r\nStart server\r\n'''\r\nif __name__ == '__main__':\r\n if(session and 'user' in session):\r\n session.pop('user', None)\r\n\r\n socketio.run(game_server, host='0.0.0.0', port=5000)\r\n","repo_name":"varmax2511/tictactoe-online","sub_path":"tictactoe/com/varun/ux/server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25065294136","text":"#!/usr/bin/python\n\n# find the length of the longest substring with unique charachters of a given string:\n\n#s = ''\ns = 'aajakubj'\n\ndef lengthOfLongestSubstring(s):\n le = 0\n re = \"\"\n i = 0\n while i < len(s):\n if re == \"\" and i+1 < len(s):\n poin = i + 1\n if s[i] not in re:\n re += s[i]\n i += 1\n #le = len(re)\n else:\n if len(re) > le:\n le = len(re)\n re = \"\"\n i = poin\n if len(re) > le:\n return len(re)\n else:\n return le\n\n# re-coding this better with sliding window option:\ndef lengthOfLongestSubstring2(s):\n # initiate a list:\n res = []\n m = 0\n for i in range(len(s)):\n if s[i] not in res:\n res.append(s[i])\n else:\n j = res.index(s[i])\n res = res[j+1:]\n res.append(s[i])\n m = max(m, len(res))\n return m\n\n\n \n\nx = lengthOfLongestSubstring2('abcabcbb')\nprint(x)\n","repo_name":"jkfer/LeetCode","sub_path":"longest_substring.py","file_name":"longest_substring.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21467580617","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n# define driver and browser\ndriver = webdriver.Safari()\ndriver.get('https://www.seleniumeasy.com/test/basic-first-form-demo.html')\n\n\ndef wait(element):\n\ttry:\n\t\telement1 = WebDriverWait(driver, 10).until(\n\t\t\tEC.presence_of_element_located((By.ID, element)))\n\t\treturn element1\n\texcept:\n\t\tprint(\"Exception might have occurred......??????\")\n\n\n# check for correct title text\nassert 'Selenium Easy' in driver.title\n\n# enter some text using send_keys\ntest_text = 'WAYHEYYYYYYY!!!!!'\nuser_message = driver.find_element_by_id('user-message')\nuser_message.clear()\nuser_message.send_keys(test_text)\n# print(user_message)\n\n# click on a button\nshow_msg_button = driver.find_element_by_class_name('btn-default')\n# print(show_msg_button.get_attribute('innerHTML'))\nshow_msg_button.click()\n\n# check that when button was pressed correct text was outputted\noutput_message = driver.find_element_by_id(\"display\")\n# print(output_message.text)\nassert test_text in output_message.text\n\n\ndriver.quit()\n","repo_name":"bobomatic/Selenium_demo","sub_path":"automation.py","file_name":"automation.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"353536587","text":"'''\nImageFiles Module\n\nThis module contains tools to work easily with image files.\n\n findImgsInDir(dirPath,fileType=None,searchPhrase=None)\n\n - Looks inside a directory for files (like images) of a specific\n file type and contain an optional key phrase in the file name.\n\n findSubDirs(searchPath)\n\n - Returns all sub directories directly under the folder\n indicated by the user\n\n makedir(dir2make)\n\n - Creates a new desired folder\n\n makeSoftLink(file2Link,linkPath)\n\n - Creates a softlink for a file\n\n getFieldNumber(fieldName)\n\n - Returns the number of this field of view\n\n getMetadata(imp)\n\n - Reads the metadata for an image file\n\n saveCompressedImg(img,path)\n\n - Saves an image with data compression\n\n openVirtualStack(path)\n\n - Opens image file as a virtual stack\n\n'''\n\n########################################################################\n############################ IMPORT PACKAGES ###########################\n########################################################################\n\n# Import glob so we can list out the contents of a directory, avoiding\n# hidden files\nimport glob\n\n# Import os so we can join file path elements, and check to see if\n# something is a file or directory\nimport os\n\n# Import regular expressions so we can search strings for specific\n# information\nimport re\n\n# Import bio-formats image reader and MetadataTools so we can work with\n# image metadata\nfrom loci.formats import CoreMetadata, MetadataTools\n\n# Import Hashtable from java so we can create metadata maps\nfrom java.util import Hashtable\n\n# Import IJ so we can figure out the ImageJ version\nfrom ij import IJ\n\n# Import length from bio formats\nfrom ome.units.quantity import Length\n\n# Import bio formats' units class\nfrom ome.units import UNITS\n\n# Import bio formats' positive float class\nfrom ome.xml.model.primitives import PositiveFloat\n\n# Import bio-format's Tiff writer\nfrom loci.formats.out import TiffWriter\n\n# Import some image tools from bio-formats so we can convert ImagePlus\n# images into something readable by bio-formats\nfrom loci.formats.gui import AWTImageTools as tools\n\n########################################################################\n############################ findImgsInDir #############################\n########################################################################\n\n# Define function to find image files within a directory\ndef findImgsInDir(dirPath,fileType=None,searchPhrase=None,searchSubDirs=False):\n '''\n Looks inside a directory for files (like images) of an optional file\n type that contain an optional key phrase in the file name.\n\n findImgsInDir(dirPath,fileType='tif',searchPhrase=None)\n\n - dirPath (String): Path to directory within which you would\n like to search for your files\n\n - fileType (String): File type for the files you are trying to\n locate, optional (default = None)\n\n - searchPhrase (String): Regular expression that is contained\n within the file name of the files you\n are trying to locate, optional\n (default = None)\n\n - searchSubDirs (Boolean): Do you want to search sub-folders\n recursively? (default = False, don't\n search subfolders)\n\n OUTPUT\n\n - files (List of Strings): Paths to all files in directory that\n match our desired file type and\n search phrase\n\n AR Oct 2021\n AR Jan 2022: Changed searchPhrase to a regular expression\n AR Mar 2022: Added option to search sub folders\n '''\n\n # If dirPath was provided in unicode, convert to String\n if isinstance(dirPath,unicode):\n dirPath = str(dirPath)\n\n # Define a function to check the file type of a file\n def is_file_type(file_path):\n '''\n Check to see if a given file has the correct file type\n\n is_file_type(file)\n\n - file_path (String): Path to file that needs the file type\n checked\n\n OUTPUT True if file is of specified our desired file type\n\n AR 10/21\n '''\n\n # Check to make sure file types were specified\n if fileType is None:\n\n # If no file types were specified, all files are fair game\n return True\n\n else:\n\n # Check to see if the image file name ends with the\n # specified file type\n if file_path.endswith(fileType):\n return True\n else:\n return False\n\n # Define a function to check to see if a file name contains our\n # desired key phrase\n def has_search_phrase(file_path):\n '''\n Check to see if a file contains our desired search phrase\n\n has_search_phrase(file_path)\n\n - file_path (String): Path to file that needs the file type\n checked\n\n OUTPUT True if file contains our key search phrase\n\n AR 10/21\n '''\n\n # Check to see if we have a desired search phrase\n if searchPhrase is None:\n\n # If there is no search phrase to look for, all files are\n # fine\n return True\n\n else:\n\n # Make a regular expression out of the search phrase\n regexp = re.compile(searchPhrase)\n\n # Check to see if the search phrase is present in the image\n # file name\n if regexp.search(file_path) is None:\n return False\n else:\n return True\n\n # Initialize a list storing all of the files in our directory of the\n # correct file type and that contain our desired key phrase\n files2return = []\n\n # If we are searching through sub-directories ...\n if searchSubDirs:\n\n # Us os.walk to loop across all files in the input directory and\n # its sub directories\n for subDir, _, files in os.walk(dirPath):\n\n # Loop across all files\n for file_name in files:\n\n # Concatenate the directory's path with each file name\n full_path = os.path.join(subDir,file_name)\n\n # Check the file type\n if is_file_type(file_name):\n\n # Check to see if the file's path contains our\n # desired key phrase\n if has_search_phrase(full_path):\n\n # If this file passes these checks, we should\n # return it\n files2return.append(full_path)\n\n else:\n\n # Use os.listdir to list out the contents of our search directory\n for file_name in glob.glob(os.path.join(dirPath,'*')):\n\n # Concatenate the directory's path with each file name\n full_path = os.path.join(dirPath,file_name)\n\n # Check to make sure this content is a file rather than a\n # directory\n if os.path.isfile(full_path) or os.path.islink(full_path):\n\n # Check the file type\n if is_file_type(file_name):\n\n # Check to see if the file contains our desired key\n # phrase\n if has_search_phrase(file_name):\n\n # If this file passes these checks, we should return\n # it\n files2return.append(full_path)\n\n # Check to see if there was only one file to return\n if len(files2return) == 1:\n\n # Return just that one file as a string instead of list\n return files2return[0]\n\n # Otherwise, if there are multiple files to return, just return full\n # list\n return files2return\n\n########################################################################\n############################## findSubDirs #############################\n########################################################################\n\n# Define a function that will return all sub directories under a path\ndef findSubDirs(searchPath):\n '''\n Returns all sub directories directly under the folder indicated by\n the user\n\n findSubDirs(searchPath)\n\n - searchPath (String): Path to folder under which you would like\n to find sub directories\n\n OUTPUT list of strings giving the names of all sub directories under\n searchPath\n\n AR Oct 2021\n '''\n\n # Use os.listdir to get all contents under searchPath, and check to\n # see what are subdirectories\n subdirs = [dir for dir in os.listdir(searchPath) if os.path.isdir(os.path.join(searchPath,dir))]\n\n # If there was more than one sub directory...\n if len(subdirs) > 1:\n\n # ... return the whole list\n return subdirs\n\n # If there was only one sub directory ...\n else:\n\n # ... return that sub directory\n return subdirs[0]\n\n########################################################################\n################################ makedir ###############################\n########################################################################\n\n# Define a function that will make new directories\ndef makedir(dir2make):\n '''\n Creates a new desired folder\n\n makedir(dir2make)\n\n - dir2make (String): Path to the location of the new directory\n you want to make\n\n Will first check to see if the directory you are trying to make\n already exists. It it doesn't already exist, this function will make\n the folder.\n\n AR Oct 2021\n '''\n\n # Check to see if the folder already exists\n if not os.path.exists(dir2make):\n\n # Make the folder if it doesn't already exist\n os.makedirs(dir2make)\n\n########################################################################\n############################# makeSoftLink #############################\n########################################################################\n\n# Define a function for making soft links\ndef makeSoftLink(file2Link,linkPath):\n '''\n Creates a softlink for a file\n\n makeSoftLink(file2Link,linkPath)\n\n - file2Link (String): Path to the file you want to make a\n softlink to\n\n - linkPath (String): File path to where you want to make your\n soft link\n\n AR Oct 2021\n '''\n\n # Store the directory where the link will be saved\n linkDir = os.path.dirname(linkPath)\n\n # Change the current working directory to where we want to make the\n # soft link\n os.chdir(linkDir)\n\n # Store the relative path from where the file to be linked is\n # located to where we want to make the soft link\n linkRelPath = os.path.relpath(file2Link,linkDir)\n\n # Create the softlink\n os.symlink(linkRelPath,linkPath)\n\n########################################################################\n############################ getFieldNumber ############################\n########################################################################\n\n# Define a function to get the field of view number\ndef getFieldNumber(fieldName):\n '''\n Returns the number of this field of view\n\n getFieldNumber(fieldName)\n\n - fieldName (String): File name of the field of view\n\n OUTPUT the number of this field of view as an integer\n\n AR Dec 2021\n AR Feb 2022: Updated since we're no longer numbering fields by row\n or column\n '''\n\n # Define a regular expression to identify the row and column numbers\n regex = re.compile('.*Field-(?P\\d+)_.*')\n\n # Match the string pattern with our field of view name\n matches = regex.match(str(fieldName))\n\n # Return the field of view number as an integer\n return int(matches.groupdict()['Field_of_View_Number'])\n\n########################################################################\n############################## getMetadata #############################\n########################################################################\n\n# Write a function that will get the bio-formats meta data for an image\ndef getMetadata(imp):\n '''\n Reads the metadata for an image file\n\n getMetadata(imp)\n\n - imp (Fiji ImagePlus): Image you want to create metadata for\n\n OUTPUT MetadataStore object from the bio-formats java library\n containing the metadata for this image\n\n AR Jan 2022\n '''\n\n # Initialize an object to store core metadata\n core = CoreMetadata()\n\n # Store the file info from the ImagePlus\n impFileInfo = imp.getFileInfo()\n\n # Use the image plus object to extract the image properties to add\n # to the metadata\n core.bitsPerPixel = imp.getBytesPerPixel() * 8 # A byte is a group\n # of 8 bits\n core.dimensionOrder = 'XYZTC'\n core.imageCount = impFileInfo.nImages\n core.littleEndian = impFileInfo.intelByteOrder\n core.pixelType = imp.getBytesPerPixel()\n core.rgb = imp.getBitDepth() == 24 # 24 bit depth for ImagePlus is\n # for RGB images\n core.sizeC = imp.getNChannels()\n core.sizeT = imp.getNFrames()\n core.sizeX = imp.getWidth()\n core.sizeY = imp.getHeight()\n core.sizeZ = imp.getNSlices()\n\n # Create a metadata map for this image\n metaMap = Hashtable()\n\n # Grab the calibration for this image\n impCalibration = imp.getCalibration()\n\n # Store the current ImageJ version\n ImageJVersion = IJ.getVersion()\n\n # Add in metadata for our image into our metadata map\n metaMap.put('ImageLength',imp.getHeight())\n metaMap.put('XResolution',impCalibration.getX(1))\n metaMap.put('ImageJ',ImageJVersion[ImageJVersion.index('/')+1:])\n metaMap.put('YResolution',impCalibration.getY(1))\n metaMap.put('ResolutionUnit',impCalibration.getUnit())\n metaMap.put('Unit',impCalibration.getUnit())\n metaMap.put('NumberOfChannels',imp.getNChannels())\n metaMap.put('BitsPerSample',imp.getBytesPerPixel() * 8)\n metaMap.put('ImageWidth',imp.getWidth())\n metaMap.put('SamplesPerPixel',impFileInfo.samplesPerPixel)\n\n # Add the metadata map to the core metadata\n core.seriesMetadata = metaMap\n\n # Initialize an OME-XML metadata storage object\n meta = MetadataTools.createOMEXMLMetadata()\n\n # Populate this OME-XML metadata storage using our core metadata\n MetadataTools.populateMetadata(meta,0,imp.getTitle(),core)\n\n # Add the resolution to the image metadata\n # TODO: Don't hard code image resolution\n meta.setPixelsPhysicalSizeX(Length(impCalibration.getX(1),UNITS.MICROMETER),0)\n meta.setPixelsPhysicalSizeY(Length(impCalibration.getY(1),UNITS.MICROMETER),0)\n meta.setPixelsPhysicalSizeZ(Length(impCalibration.getZ(1),UNITS.MICROMETER),0)\n\n\n # Return the metadata\n return meta\n\n########################################################################\n########################### saveCompressedImg ##########################\n########################################################################\n\n# Write a function that will save an image with compression\ndef saveCompressedImg(img,metaData,outFile):\n '''\n Saves an image with data compression\n\n saveCompressedImg(img,path)\n\n - img (ImagePlus): Image you want to saved\n\n - metaData (Bio-Formats MetadataStore): Metadata for the image\n you are saving\n\n - outFile (String): File path to where you want to save the image\n\n AR Jan 2022\n '''\n\n # Initialize a bio-formats image writer object\n writer = TiffWriter()\n\n # Add the metadata to this writer\n writer.setMetadataRetrieve(metaData)\n\n # Instruct the writer to save the image with compression\n writer.setCompression(\"zlib\")\n\n # Set the file location to write to\n writer.setId(outFile)\n\n # loop across all planes of the image\n for p in range(img.getNSlices()):\n\n # Set the current z-slice of the image\n img.setSliceWithoutUpdate(p+1)\n\n # Crop the current z-slice of the image\n curSlice = img.crop()\n\n # Convert this plane into a java buffered image\n bufrdimg = curSlice.getBufferedImage()\n\n # Convert the buffered image into a bytes array\n plane = tools.getBytes(bufrdimg)\n\n # Save the bytes from this plane to the file\n writer.saveBytes(p,plane[0])\n\n # Close the writer object\n writer.close()\n\n########################################################################\n########################### openVirtualStack ###########################\n########################################################################\n\n# Define a function to open image files as virtual stacks to save memory\ndef openVirtualStack(path):\n '''\n Opens image file as a virtual stack\n\n openVirtualStack(path)\n\n - path (String): File path to image you want to openVirtualStack\n\n OUTPUT ImagePlus object containing the virtual stack\n\n AR Feb 2023\n '''\n\n # Open the image using bio-formats\n IJ.run(\"Bio-Formats\",\n 'open={} color_mode=Default rois_import=[ROI manager] view=Hyperstack stack_order=XYCZT use_virtual_stack'.format(path));\n\n # Grab the image plus object\n imp = IJ.getImage()\n\n # Hide the image plus object\n imp.hide()\n\n # Return the resulting image plus object\n return imp\n","repo_name":"VPNL/FijiUpdateSite","sub_path":"FijiPyLib/src/main/resources/ImageTools/ImageFiles.py","file_name":"ImageFiles.py","file_ext":"py","file_size_in_byte":17457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1037350653","text":"import sqlite3\nimport user_choice\nimport add_client\n\nbase = sqlite3.connect('c:\\sqlite\\empernar.db')\ncur = base.cursor()\n\ndef bienvenida():\n\n print(\"Bienvenido/a Sr/a. cliente: \")\n print(\"1 - Ingrese '1' para iniciar sesión.\")\n print(\"1 - Ingrese '2' para registrarse.\")\n\n choice = int(input())\n\n if choice == 1:\n dni = input(\"Ingrese su dni: \")\n cur.execute('select ID_CLIENTE, NOMBRE from CLIENTES where DNI = \"{}\"'.format(dni))\n resp = cur.fetchall()\n confirm = input(\"Es usted: \" + str(resp) + \"? (Y/N): \").upper()\n\n if confirm == \"Y\":\n print(\"Visualise, elija y decida opciones para su viaje: \")\n user_choice.verTablas()\n elif confirm == \"N\":\n bienvenida()\n else:\n print(\"Ingrese un valor valido, por favor.\")\n\n elif choice == 2:\n add_client.agregarCliente()\n bienvenida()\n\n elif choice != type(int):\n print(\"Ingrese un valor valido, por favor.\")","repo_name":"peculiarchild/Base_de_Datos_Empernar.com","sub_path":"welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31382796798","text":"from __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport time\r\nimport argparse\r\nimport numpy as np\r\nimport random\r\nfrom tqdm import tqdm\r\nimport pandas as pd\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\n\r\nfrom GraphSage.encoders import Encoder\r\nfrom GraphSage.aggregators import MeanAggregator\r\nfrom GraphSage.model import SupervisedGraphSage\r\nfrom utils_NEDA import load_data, EarlyStopping, accuracy, get_excel_name\r\n\r\nparser = argparse.ArgumentParser(\"\")\r\nparser.add_argument('--no-cuda', action='store_true', default=False,\r\n help='Disables CUDA training.')\r\nparser.add_argument('--gcn', action='store_true', default=False,\r\n help='Determine the aggregation method.')\r\nparser.add_argument('--fastmode', action='store_true', default=True,\r\n help='Validate during training pass.')\r\nparser.add_argument('--seed', type=int, default=0, help='Random seed.')\r\nparser.add_argument('--epochs', type=int, default=5000,\r\n help='Number of epochs to train.')\r\nparser.add_argument('--lr', type=float, default=0.01,\r\n help='Initial learning rate.')\r\nparser.add_argument('--weight_decay', type=float, default=5e-4,\r\n help='Weight decay (L2 loss on parameters).')\r\nparser.add_argument('--hidden', type=int, default=32,\r\n help='Number of hidden units.')\r\nparser.add_argument('--early-stop', action='store_true', default=True,\r\n help=\"indicates whether to use early stop or not\")\r\nparser.add_argument('--cuda_device', type=int, default=0, help='Cuda device')\r\nparser.add_argument('--patience', type=int, default=100, help='Patience')\r\nparser.add_argument('--similarity', action='store_true', default=False, help='For comparison with baseline')\r\nparser.add_argument('--infected_number', type=int, default=15, help=\"Number of infected individuals\")\r\nparser.add_argument('--is_copy', action='store_true', default=False, help='Whether the attributes of the infected person are copied')\r\nparser.add_argument('--sample1', type=int, default=5, help=\"Number of neighbors for first-order sampling\")\r\nparser.add_argument('--sample2', type=int, default=10, help=\"Number of neighbors for second-order sampling\")\r\nparser.add_argument('--train_percentage', type=float, default=0.6, help=\"The proportion of the training set\")\r\nparser.add_argument('--val_percentage', type=float, default=0.2, help=\"The proportion of the validation set\")\r\nparser.add_argument('--dataset', type=str, default='wisconsin')\r\nargs = parser.parse_args()\r\n\r\ndef experiment(args):\r\n args.cuda = not args.no_cuda and torch.cuda.is_available()\r\n dataset = args.dataset\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n random.seed(args.seed)\r\n if args.cuda:\r\n torch.cuda.manual_seed(args.seed)\r\n torch.cuda.set_device(args.cuda_device)\r\n if args.early_stop:\r\n stopper = EarlyStopping(patience=100)\r\n\r\n # Load data\r\n feat_data, labels, adj_lists, train, val, test, extended_neighborhood_coefficient \\\r\n = load_data(dataset,\r\n args.infected_number,\r\n args.similarity,\r\n args.is_copy,\r\n args.train_percentage,\r\n args.val_percentage)\r\n\r\n features = nn.Embedding(*feat_data.shape)\r\n features.weight = nn.Parameter(torch.FloatTensor(feat_data), requires_grad=False)\r\n\r\n agg1 = MeanAggregator(features, cuda=args.cuda, gcn=args.gcn, similarity=args.similarity)\r\n enc1 = Encoder(features, feat_data.shape[1], args.hidden, adj_lists, agg1, args.sample1, gcn=args.gcn, cuda=args.cuda)\r\n agg2 = MeanAggregator(lambda nodes: enc1(nodes).t(), cuda=args.cuda, gcn=args.gcn, similarity=args.similarity)\r\n enc2 = Encoder(lambda nodes: enc1(nodes).t(), args.hidden, args.hidden, adj_lists, agg2, args.sample2,\r\n base_model=enc1, gcn=args.gcn, cuda=args.cuda)\r\n\r\n graphsage = SupervisedGraphSage(labels.max().item() + 1, enc2)\r\n loss_fcn = torch.nn.CrossEntropyLoss()\r\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, graphsage.parameters()),\r\n lr=args.lr,\r\n weight_decay=args.weight_decay)\r\n\r\n for epoch in range(args.epochs):\r\n t0 = time.time()\r\n graphsage.train()\r\n train_batch_nodes = train # train[:256]\r\n random.shuffle(train)\r\n\r\n # forward\r\n train_logits = graphsage(train_batch_nodes)\r\n train_loss = loss_fcn(train_logits, Variable(torch.LongTensor(labels[np.array(train_batch_nodes)])))\r\n\r\n optimizer.zero_grad()\r\n train_loss.backward()\r\n optimizer.step()\r\n\r\n train_acc = accuracy(train_logits, labels[train_batch_nodes])\r\n\r\n graphsage.eval()\r\n val_batch_nodes = val\r\n with torch.no_grad():\r\n val_logits = graphsage(val_batch_nodes)\r\n val_acc = accuracy(val_logits, labels[val_batch_nodes])\r\n if args.early_stop:\r\n if stopper.step(val_acc, graphsage):\r\n break\r\n if args.fastmode:\r\n continue\r\n else:\r\n print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |\"\r\n \" ValAcc {:.4f}\".format(epoch, time.time() - t0, train_loss.item(), train_acc, val_acc))\r\n\r\n if args.early_stop:\r\n graphsage.load_state_dict(torch.load('es_checkpoint.pt'))\r\n test_logits = graphsage(test)\r\n test_acc = accuracy(test_logits, labels[test])\r\n print(\"Test Accuracy {:.4f}\".format(test_acc))\r\n return test_acc, extended_neighborhood_coefficient\r\n\r\ndef experiment_average(number_experiments):\r\n t0 = time.time()\r\n acc_result = []\r\n enc_list = []\r\n for i in tqdm(range(number_experiments)):\r\n args.seed = i\r\n # print(args)\r\n test_acc, extended_neighborhood_coefficient = experiment(args)\r\n\r\n enc_list.append(pd.Series(extended_neighborhood_coefficient).to_frame(str(i)))\r\n acc_result.append(test_acc)\r\n\r\n print()\r\n print(\"{}_{}_Average_accuracy:{:.4f}~{:.4f} | Time(s) {:.4f}\".format(args.dataset,\r\n get_excel_name(args.similarity, args.is_copy),\r\n np.array(acc_result).mean(),np.array(acc_result).std(),\r\n time.time() - t0))\r\n for i in range(1, number_experiments):\r\n enc_list[0] = pd.merge(enc_list[0], enc_list[i], left_index=True, right_index=True, how='outer')\r\n return round(np.array(acc_result).mean(),4)\r\n\r\n\r\nif __name__ == '__main__':\r\n number_experiments = 10\r\n experiment_average(number_experiments)\r\n","repo_name":"xueyanfeng/NEDA","sub_path":"train_NEDA.py","file_name":"train_NEDA.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74729818961","text":"from flask import Flask, request, redirect, url_for, render_template\nfrom jinja2 import Template, Environment, FileSystemLoader\n\n\napp = Flask (__name__)\n\nfile = FileSystemLoader('templates')\nenvironment = Environment(loader=file)\n\ndef vowel_count(text):\n vocales = 0\n count = \"AaEeIiOoUu\"\n for vow in text:\n if vow in count:\n vocales = vocales +1\n return vocales\n\ndef consonant_count (text):\n consonante = 0\n count = \"AaEeIiOoUu\"\n for con in text:\n if con not in count:\n consonante = consonante + 1\n return consonante\n\ndef UpDown(text):\n palabra = \"\"\n contador = 1\n for char in text:\n if contador -1 == 0:\n palabra += char.lower()\n contador = contador + 1\n else:\n palabra += char.upper()\n contador = contador - 1\n return palabra\n\ndef naive(text):\n string = \"\"\n string = text.replace(\"a\",\"@\").replace(\"e\",\"3\").replace(\"i\",\"!\").replace(\"o\",\"0\").replace(\"u\",\")\")\n return string\n\n\ndef cambios(text):\n cambio = {}\n if text == \"\":\n return cambio\n \n cambio [\"Reverse\"] = text[::-1]\n cambio [\"Lenght\"] = len(text)\n cambio [\"Vowels\"] = vowel_count(text)\n cambio [\"Consonants\"] = consonant_count (text)\n cambio [\"Upper\"] = text.upper()\n cambio [\"Lower\"] = text.lower()\n cambio [\"UpDown\"] = UpDown(text)\n cambio [\"Naive\"] = naive(text)\n return (cambio)\n\n@app.route ('/', methods = ['POST', 'GET'])\ndef form_post():\n palabra_ingresada = request.args.get(\"text\", \"\")\n cambio = cambios(palabra_ingresada)\n return render_template(\"myform.html\", cambio = cambio, palabra_ingresada = palabra_ingresada)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\",debug=True)","repo_name":"Tuki1077/Terminal-parcial-","sub_path":"TareaParcial/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7489251250","text":"import os\nimport sys\nimport torch\nsys.path.insert(0, \"../../src\")\nimport config\nimport numpy as np\nimport torch.nn as nn\nfrom models import CNNModel\nfrom normalizations import *\nfrom datasets import MVTSDataset\nfrom torch.utils.data import DataLoader\nfrom sklearn.model_selection import train_test_split\n\n'''\nThis script is used to train a CNN model over 50 random train/val splits of the dataset.\n'''\n\n# Train and validation loops for the CNN model\n\ndef train():\n model.train()\n train_loss = 0\n train_correct = 0\n total_samples = 0 # Variable to keep track of total samples\n for i, (x, mask, y) in enumerate(train_dataloader):\n x = config.ACTIVE_NORM(x) #? Secondary layer of normalization\n x = torch.nan_to_num(x).to(device).unsqueeze(1) # Add a channel dimension\n y = y.to(device).long() # Convert the target tensor to long\n optimizer.zero_grad()\n probabilities = model(x)\n loss = criterion(probabilities, y)\n loss.backward()\n optimizer.step()\n train_loss += loss.item()\n train_correct += (probabilities.argmax(dim=-1) == y).sum().item()\n total_samples += x.size(0) # Increment the total samples by batch size\n return train_loss / len(train_dataloader), train_correct / total_samples # Divide by total_samples\n\ndef val():\n model.eval()\n val_loss = 0\n val_correct = 0\n total_samples = 0 # Variable to keep track of total samples\n for i, (x, mask, y) in enumerate(val_dataloader):\n x = config.ACTIVE_NORM(x) #? Secondary layer of normalization\n x = torch.nan_to_num(x).to(device).unsqueeze(1) # Add a channel dimension\n y = y.to(device).long() # Convert the target tensor to long\n probabilities = model(x)\n loss = criterion(probabilities, y)\n val_loss += loss.item()\n val_correct += (probabilities.argmax(dim=-1) == y).sum().item()\n total_samples += x.size(0) # Increment the total samples by batch size\n return val_loss / len(val_dataloader), val_correct / total_samples # Divide by total_samples\n\n\nif __name__ == '__main__':\n\n # Collect filenames for all 50 models\n path_to_splits = '../../kfold_results/splits/'\n file_names = np.arange(0, 50, 1)\n file_names = np.delete(file_names, np.argwhere(file_names == 27))\n file_names = np.delete(file_names, np.argwhere(file_names == 37))\n\n for file_name in file_names:\n\n # Load split indices\n path = path_to_splits + 'fold_' + str(file_name) + '.npz'\n fhand = np.load(path)\n val_indices = fhand['val_indices']\n train_indices = fhand['train_indices']\n\n # Create dataloaders\n val_dataloader = DataLoader(MVTSDataset(val_indices, norm_type=config.BASE_NORM), batch_size=16, shuffle=True, drop_last=True)\n train_dataloader = DataLoader(MVTSDataset(train_indices, norm_type=config.BASE_NORM), batch_size=16, shuffle=True, drop_last=True)\n\n # Define model, optimizer, and loss function\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n criterion = nn.CrossEntropyLoss()\n model = CNNModel().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n # Train and validate the model\n best_loss = np.inf\n best_val_acc = 0\n best_val_loss = float('inf')\n best_model_state_dict = None\n\n for epoch in range(config.N_EPOCHS):\n train_loss, train_acc = train()\n val_loss, val_acc = val()\n \n # Save the best model based on validation loss\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_model_state_dict = model.state_dict()\n best_val_acc = val_acc\n \n print(f'Epoch {epoch + 1} | Train Loss: {train_loss:.5f} | Train Acc: {train_acc * 100:.2f}% | Val Loss: {val_loss:.5f} | Val Acc: {val_acc * 100:.2f}%')\n\n\n # Save the best model to a file\n base_save_dir = '../../models/' + config.RUN_NAME + '/'\n if os.path.exists(base_save_dir) == False:\n os.mkdir(base_save_dir)\n torch.save(best_model_state_dict, f'{base_save_dir}{file_name}.pth')\n\n # Clean up memory\n del model, optimizer, criterion, train_dataloader, val_dataloader, best_model_state_dict","repo_name":"brandonlpanos/mvts","sub_path":"src/kfold/kfold_cnn.py","file_name":"kfold_cnn.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22649822160","text":"import numpy as np\nimport os\nimport mat73\nimport scipy\nimport matplotlib.pyplot as plt\nfrom typing import Union, List,Tuple\n\ndef runRecon(basePath: str,\n mainSavePath: str,\n coilInfo: str,\n setName: str,\n fileType: str,\n AFtype: int,\n AFname: str,\n type: int = 0,\n reconType: int = 0,\n imgShow: int = 0\n )->np.ndarray:\n \"\"\"\n Python runRecon convert from matlab in CMRxRecon\n\n %% parameter meaning\n % type = 0 means full kspace data\n % type = 1 means subsampled data\n\n % reconType = 0: perform zero-filling recon\n % reconType = 1: perform GRAPPA recon\n % reconType = 2: perform SENSE recon\n % reconType = 3: perform both GRAPPA and SENSE recon\n\n % imgShow = 0: ignore image imshow\n % imgShow = 1: image imshow\n\n % filetype: 'cine_lax', 'cine_sax', 'T1map', 'T2map'\n\n \"\"\"\n #set name\n if fileType in ['cine_lax','cine_sax']:\n modalityName = 'Cine'\n else:\n modalityName = 'Mapping'\n\n #run for different Acc factors\n for ind0 in range(3):\n mainDataPath = basePath + coilInfo + modalityName + setName + AFtype[ind0]\n savePath = mainSavePath + coilInfo + modalityName + setName + AFtype[ind0]\n fileList = dir(mainDataPath)\n #runing all patients\n for fileName in fileList:\n dataPath = os.path.join(mainDataPath,os.path.join(fileName, fileType + '.mat'))\n data = mat73.load(dataPath)\n variable_name = list[data.key()][0]\n kspace = data[variable_name]\n # to reduce the computing burden and space, we only evaluate the central 2 slices\n # For cine: use the first 3 time frames for ranking!\n # For mapping: we need all weighting for ranking!\n sx,sy,_,sz,t = kspace.shape\n if fileType in ['cine_lax','cine_sax']:\n reconImg = ChallengeRecon(kspace[:,:,:,sz//2], type, reconType, imgShow)\n img4ranking = crop(np.abs(reconImg),[np.round(sx/3),np.round(sy/2),2,3]).astype(np.float32)\n else:\n reconImg = ChallengeRecon(kspace[:,:,:,sz//2], type, reconType, imgShow)\n img4ranking = crop(np.abs(reconImg),[np.round(sx/3),np.round(sy/2),2,t]).astype(np.float32)\n\n #mkdir for saving\n saveFilePath = os.path.join(savePath,fileName)\n if not os.path.exists(saveFilePath):\n os.mkdir(saveFilePath)\n assert os.path.isdir(saveFilePath)\n mdict = {\"img4ranking\":img4ranking}\n scipy.savemat(os.path.join(saveFilePath,fileType+'.mat'),mdict)\n \n print(str(AFtype[ind0]) + 'reconstructed successfully!')\n\ndef ChallengeRecon(kspace: np.ndarray,\n type: int,\n reconType: int,\n imgShow: int)->np.ndarray:\n \"\"\"\n % kspace: complex images with the dimensions (sx,sy,sc,sz,t/w)\n % -sx: matrix size in x-axis\n % -sy: matrix size in y-axis\n % -sc: coil array number\n % -sz: slice number (short axis view); slice group (long axis view)\n % -t/w: time frame/weighting\n\n % type = 0 means full kspace data\n % type = 1 means subsampled data\n\n % reconType = 0: perform zero-filling recon\n % reconType = 1: perform GRAPPA recon\n % reconType = 2: perform SENSE recon\n % reconType = 3: perform both GRAPPA and SENSE recon\n\n % imgShow = 0: ignore image imshow\n % imgShow = 1: image imshow\n \"\"\"\n if type == 0:\n kspace_full = kspace\n sx,sy,scc,sz,nPhase = kspace_full.shape\n img_full = np.zeros(sx,sy,scc,sz,nPhase)\n img_full_sos = np.zeros(sx,sy,sz,nPhase)\n for ind1 in range(sz):\n for ind2 in range(nPhase):\n img_full[:,:,:,ind1,ind2] = ifft2c(kspace_full[:,:,:,ind1,ind2])\n img_full_sos[:,:,ind1,ind2] = sos(img_full[:,:,:,ind1,ind2])\n if imgShow == 1:\n plt.figure()\n plt.imshow(np.abs(img_full_sos[:,:,0,0]),vmin=0.0,vmax=0.001)\n plt.show(block=False)\n recon = img_full_sos\n else:\n #load data\n ncalib = 24\n kspace_sub = kspace\n sx,sy,scc,sz,nPhase = kspace.shape\n kspace_cal = np.zeros((sx,ncalib,scc,sz,nPhase))\n img_zf = np.zeros((sx,sy,scc,sz,nPhase))\n img_sos = np.zeros(sx,sy,sz,nPhase)\n #generate calibration data\n for ind2 in range(nPhase):\n kspace_calb = crop(kspace_sub[:,:,:,:,1],(sx,ncalib,scc,sz))\n kspace_cal[:,:,:,:,ind2] = kspace_calb\n #perform ZF recon\n if reconType == 0:\n for ind1 in range(sz):\n for ind2 in range(nPhase):\n img_zf[:,:,:,ind1,ind2] = ifft2c(kspace_sub[:,:,:,ind1,ind2])\n img_sos[:,:,ind1,ind2] = sos(img_zf[:,:,:,ind1,ind2])\n print(str(ind1) + '/' + str(ind2) + ' completed!')\n if imgShow == 1:\n plt.figure()\n plt.imshow(np.abs(img_full_sos[:,:,0,0]),vmin=0.0,vmax=0.001)\n plt.show(block=False)\n recon = img_sos\n #perform GRAPPA recon\n if reconType == 1 | reconType == 3:\n img_grappa = np.zeros((sx,sy,scc,sz,nPhase))\n kspace_grappa = np.zeros((sx,sy,scc,sz,nPhase))\n img_grappa_sos = np.zeros((sx,sy,sz,nPhase))\n for inn1 in range(sz):\n for ind2 in range(nPhase):\n kspace_grappa[:,:,:,ind1,ind2], img_grappa[:,:,:,ind1,ind2] = myGRAPPA()\n img_grappa_sos[:,:,ind1,ind2] = sos(img_grappa[:,:,:,ind1,ind2])\n print(str(ind1) + '/' + str(ind2) + ' completed!')\n if imgShow == 1:\n plt.figure()\n plt.imshow(np.abs(img_grappa_sos[:,:,0,0]),vmin=0.0,vmax=0.001)\n plt.show(block=False)\n recon = img_grappa_sos\n # perform SENSE recon\n if reconType == 2:\n img_sense = np.zeros((sx,sy,sz,nPhase))\n kspace_sense = np.zeros((sx,sy,scc,sz,nPhase))\n #perfomr sense recon\n for ind1 in range(sz):\n for ind2 in range(nPhase):\n kspace_sense[:,:,:,ind1,ind2], img_sense[:,:,:,ind1,ind2] = mySENSE()\n print(str(ind1) + '/' + str(ind2) + ' completed!')\n if imgShow == 1:\n plt.figure()\n plt.imshow(np.abs(img_sense[:,:,0,0]),vmin=0.0,vmax=0.001)\n plt.show(block=False)\n recon = img_sense\n return recon\n\n\nif __name__ == '__main__':\n x = np.arange(20).reshape((5,4))\n print(x)\n print(crop(x,(3,2)))\n print(pad(crop(x,[3,2]),(5,4)))\n\n\n \n","repo_name":"15625148866/CMRxRecon","sub_path":"CMRxReconDemo/Python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"25485752063","text":"\"\"\"\n\"\"\"\n\n# Import Libraries\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport matplotlib\nimport configparser\nimport threading\n\nmatplotlib.use('TkAgg')\nfrom matplotlib import pyplot as plt\n\npath = os.path.abspath('../../')\nif not path in sys.path:\n sys.path.append(path)\nimport mmwsdr\n\n\ndef main():\n \"\"\"\n\n :return:\n :rtype:\n \"\"\"\n # Parameters\n isdebug = True\n\n # Create an argument parser\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--node\", type=str, default='srv1-in1', help=\"COSMOS-SB1 node name (i.e., srv1-in1)\")\n args = parser.parse_args()\n\n # Create a configuration parser\n config = configparser.ConfigParser()\n config.read('../../config/sivers.ini')\n\n xytable0 = mmwsdr.utils.XYTable(config[args.node]['table_name'], isdebug=isdebug)\n xytable0.move(x=float(config[args.node]['x']), y=float(config[args.node]['y']),\n angle=float(config[args.node]['angle']))\n\n t = threading.Thread(target=xytable0.video)\n t.start()\n\n # Create a move\n xytable0.move(x=500, y=500, angle=-45)\n\n t.join()\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass\n","repo_name":"nyu-wireless/mmwsdr","sub_path":"host/demos/basic/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"4458304516","text":"#!/usr/bin/env python3\n\nfrom math import log\n\n\ndef calculate(op1, op2, operator=\"+\"):\n \"\"\"\n Takes in two operands (op1, op2) and an operator (+, -, *, /)\n to perform a calculation. Exception for Zero Dision handled below.\n\n Returns answer.\n \"\"\"\n try:\n int_1 = int(op1)\n int_2 = int(op2)\n except ValueError:\n print(\"You must enter integers for param1 and param2.\")\n return None\n\n if operator == \"-\":\n answer = int_1 - int_2\n elif operator == \"log\":\n try:\n answer = log(int_1) / log(int_2)\n except ValueError:\n print(\"Cannot log 0\")\n answer = None\n elif operator == \"/\":\n try:\n answer = int_1 / int_2\n except ZeroDivisionError:\n print(\"You cannot divide by 0!\")\n answer = 0\n elif operator == \"*\":\n answer = int_1 * int_2\n elif operator == \"+\":\n answer = int_1 + int_2\n\n return answer\n\n\n# answ = calculate(34, 9, 'log')\n# print(answ)\n\n\ndef collatz(n):\n \"\"\"\n This will perform the Collatz Conjecture:\n\n for integer n > 1\n - if n is even, then n = n // 2\n - if n is odd, then n = n * 3 + 1\n\n This will continue to loop until number is equal to 1 then breaks.\n \"\"\"\n number = int(n)\n print(f\"Starting at: {number}\")\n if number > 1:\n while True:\n if number % 2 == 0:\n number = number // 2\n print((\"-\" * 12), number)\n if number == 1:\n break\n else:\n number = (number * 3) + 1\n print((\"-\" * 12), number)\n if number == 1:\n break\n else:\n print(\"Please enter a number that is greater than 1...\")\n\n\ncollatz(23)\n\n\ndef sumdigits(num):\n \"\"\"\n Takes int and sums up it's digits: 1235 = 1 + 2 + 3 + 5 = 11,\n 11 = 1 + 1 = 2\n\n This should repeat until the remaining answer is only a single digit.\n \"\"\"\n num_list = [int(n) for n in str(num)]\n ans = sum(num_list)\n\n if ans > 9:\n return sumdigits(ans)\n else:\n return ans\n\n\n# digits = 999999999999999999\n# print(f'The Sum of Sums for: {digits} is... \\n {sumdigits(digits)}')\n\n\ndef add_comas(n):\n \"\"\"\n Takes number as input (n) and should return a formatted string\n with commas in the correct location e.g. 12345 = 12,345\n \"\"\"\n number_list = [str(number) for number in str(n)]\n\n if len(number_list) > 3:\n number_list.reverse()\n index = 3\n while index < len(number_list):\n number_list.insert(index, \",\")\n index += 4\n number_list.reverse()\n return \"\".join(number_list)\n else:\n print(\"no commas needed\")\n return \"\".join(number_list)\n\n\n# with_comas = add_comas(123457890779)\n# print(with_comas)\n","repo_name":"leblanck/iea-cohort-08","sub_path":"CourseMaterial/05_python_for_devops/funcfuncfunc.py","file_name":"funcfuncfunc.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"6380607940","text":"import picamera\nimport cv2\nimport io\nimport numpy as np\nimport time\n\ndef captureImage(camera):\n # saving the picture to an in-program stream rather than a file\n stream = io.BytesIO()\n\n # capture into stream\n camera.capture(stream, format='jpeg', use_video_port=True)\n # convert image into numpy array\n data = np.fromstring(stream.getvalue(), dtype=np.uint8)\n # turn the array into a cv2 image\n img = cv2.imdecode(data, 1)\n\n return img\n\nwith picamera.PiCamera() as camera:\n\n # allow the camera to warmup\n time.sleep(0.1)\n\n for xres in [320, 640, 1280]:\n\n print(xres)\n \n # set the resolution of the camera\n camera.resolution = (xres, xres*240/320)\n\n nowT = time.time()\n for i in range(10):\n image = captureImage(camera)\n \n print(\"At X resolution\", xres, \"Captured 10 images in \", time.time()-nowT, \"secs\")\n \n cv2.imwrite('testCameraFromVideo' + str(xres) + '.jpg',image)\n","repo_name":"robdobsn/RobotPlay2048","sub_path":"Tests/TestCameraCaptureSpeeds/TestCameraFromVideoPort.py","file_name":"TestCameraFromVideoPort.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20468401720","text":"elements = [\n {\n \"atomic_no\": \"21\",\n \"name\": \"Scandium\",\n \"symbol\": \"Sc\",\n \"block\": \"d\",\n \"atomic_mass\": \"44.96\",\n \"nn\": \"24\",\n \"np\": \"21\",\n \"ne\": \"21\",\n \"period\": \"4\",\n \"group\": \"3\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2.1\",\n \"electronegativity\": \"1.36\",\n \"first_ionization\": \"6.5615\",\n \"density\": \"2.99\",\n \"melting_point\": \"1812.15\",\n \"boiling_point\": \"3109\",\n \"isotrops\": \"15\",\n \"discoverer\": \"Nilson\",\n \"year\": \"1878\",\n \"specific_heat\": \"0.568\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"22\",\n \"name\": \"Titanium\",\n \"symbol\": \"Ti\",\n \"block\": \"d\",\n \"atomic_mass\": \"47.87\",\n \"nn\": \"26\",\n \"np\": \"22\",\n \"ne\": \"22\",\n \"period\": \"4\",\n \"group\": \"4\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2\",\n \"electronegativity\": \"1.54\",\n \"first_ionization\": \"6.8281\",\n \"density\": \"4.54\",\n \"melting_point\": \"1933.15\",\n \"boiling_point\": \"3560\",\n \"isotrops\": \"9\",\n \"discoverer\": \"Gregor\",\n \"year\": \"1791\",\n \"specific_heat\": \"0.523\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"23\",\n \"name\": \"Vanadium\",\n \"symbol\": \"V\",\n \"block\": \"d\",\n \"atomic_mass\": \"50.94\",\n \"nn\": \"28\",\n \"np\": \"23\",\n \"ne\": \"23\",\n \"period\": \"4\",\n \"group\": \"5\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.9\",\n \"electronegativity\": \"1.63\",\n \"first_ionization\": \"6.7462\",\n \"density\": \"6.11\",\n \"melting_point\": \"2175.15\",\n \"boiling_point\": \"3680\",\n \"isotrops\": \"9\",\n \"discoverer\": \" del Rio\",\n \"year\": \"1801\",\n \"specific_heat\": \"0.489\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"24\",\n \"name\": \"Chromium\",\n \"symbol\": \"Cr\",\n \"block\": \"d\",\n \"atomic_mass\": \"52\",\n \"nn\": \"28\",\n \"np\": \"24\",\n \"ne\": \"24\",\n \"period\": \"4\",\n \"group\": \"6\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.9\",\n \"electronegativity\": \"1.66\",\n \"first_ionization\": \"6.7665\",\n \"density\": \"7.15\",\n \"melting_point\": \"2130.15\",\n \"boiling_point\": \"2944\",\n \"isotrops\": \"9\",\n \"discoverer\": \"Vauquelin\",\n \"year\": \"1797\",\n \"specific_heat\": \"0.449\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"25\",\n \"name\": \"Manganese\",\n \"symbol\": \"Mn\",\n \"block\": \"d\",\n \"atomic_mass\": \"54.94\",\n \"nn\": \"30\",\n \"np\": \"25\",\n \"ne\": \"25\",\n \"period\": \"4\",\n \"group\": \"7\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"1.55\",\n \"first_ionization\": \"7.434\",\n \"density\": \"7.44\",\n \"melting_point\": \"1519.15\",\n \"boiling_point\": \"2334\",\n \"isotrops\": \"11\",\n \"discoverer\": \"Gahn, Scheele\",\n \"year\": \"1774\",\n \"specific_heat\": \"0.479\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"26\",\n \"name\": \"Iron\",\n \"symbol\": \"Fe\",\n \"block\": \"d\",\n \"atomic_mass\": \"55.85\",\n \"nn\": \"30\",\n \"np\": \"26\",\n \"ne\": \"26\",\n \"period\": \"4\",\n \"group\": \"8\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.7\",\n \"electronegativity\": \"1.83\",\n \"first_ionization\": \"7.9024\",\n \"density\": \"7.87\",\n \"melting_point\": \"1808.15\",\n \"boiling_point\": \"3134\",\n \"isotrops\": \"10\",\n \"discoverer\": \"Prehistoric\",\n \"year\": \"0\",\n \"specific_heat\": \"0.449\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"27\",\n \"name\": \"Cobalt\",\n \"symbol\": \"Co\",\n \"block\": \"d\",\n \"atomic_mass\": \"58.93\",\n \"nn\": \"32\",\n \"np\": \"27\",\n \"ne\": \"27\",\n \"period\": \"4\",\n \"group\": \"9\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.7\",\n \"electronegativity\": \"1.88\",\n \"first_ionization\": \"7.881\",\n \"density\": \"8.86\",\n \"melting_point\": \"1768.15\",\n \"boiling_point\": \"3200\",\n \"isotrops\": \"14\",\n \"discoverer\": \"Brandt\",\n \"year\": \"1735\",\n \"specific_heat\": \"0.421\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"28\",\n \"name\": \"Nickel\",\n \"symbol\": \"Ni\",\n \"block\": \"d\",\n \"atomic_mass\": \"58.69\",\n \"nn\": \"31\",\n \"np\": \"28\",\n \"ne\": \"28\",\n \"period\": \"4\",\n \"group\": \"10\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.6\",\n \"electronegativity\": \"1.91\",\n \"first_ionization\": \"7.6398\",\n \"density\": \"8.91\",\n \"melting_point\": \"1726.15\",\n \"boiling_point\": \"3186\",\n \"isotrops\": \"11\",\n \"discoverer\": \"Cronstedt\",\n \"year\": \"1751\",\n \"specific_heat\": \"0.444\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"29\",\n \"name\": \"Copper\",\n \"symbol\": \"Cu\",\n \"block\": \"d\",\n \"atomic_mass\": \"63.55\",\n \"nn\": \"35\",\n \"np\": \"29\",\n \"ne\": \"29\",\n \"period\": \"4\",\n \"group\": \"11\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.6\",\n \"electronegativity\": \"1.9\",\n \"first_ionization\": \"7.7264\",\n \"density\": \"8.96\",\n \"melting_point\": \"1357.75\",\n \"boiling_point\": \"2835\",\n \"isotrops\": \"11\",\n \"discoverer\": \"Prehistoric\",\n \"year\": \"0\",\n \"specific_heat\": \"0.385\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"30\",\n \"name\": \"Zinc\",\n \"symbol\": \"Zn\",\n \"block\": \"d\",\n \"atomic_mass\": \"65.38\",\n \"nn\": \"35\",\n \"np\": \"30\",\n \"ne\": \"30\",\n \"period\": \"4\",\n \"group\": \"12\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.5\",\n \"electronegativity\": \"1.65\",\n \"first_ionization\": \"9.3942\",\n \"density\": \"7.13\",\n \"melting_point\": \"692.88\",\n \"boiling_point\": \"1180\",\n \"isotrops\": \"15\",\n \"discoverer\": \"Prehistoric\",\n \"year\": \"0\",\n \"specific_heat\": \"0.388\",\n \"shells\": \"4\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"39\",\n \"name\": \"Yttrium\",\n \"symbol\": \"Y\",\n \"block\": \"d\",\n \"atomic_mass\": \"88.91\",\n \"nn\": \"50\",\n \"np\": \"39\",\n \"ne\": \"39\",\n \"period\": \"5\",\n \"group\": \"3\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2.3\",\n \"electronegativity\": \"1.22\",\n \"first_ionization\": \"6.2173\",\n \"density\": \"4.47\",\n \"melting_point\": \"1799.15\",\n \"boiling_point\": \"3609\",\n \"isotrops\": \"21\",\n \"discoverer\": \"Gadolin\",\n \"year\": \"1794\",\n \"specific_heat\": \"0.298\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"40\",\n \"name\": \"Zirconium\",\n \"symbol\": \"Zr\",\n \"block\": \"d\",\n \"atomic_mass\": \"91.22\",\n \"nn\": \"51\",\n \"np\": \"40\",\n \"ne\": \"40\",\n \"period\": \"5\",\n \"group\": \"4\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2.2\",\n \"electronegativity\": \"1.33\",\n \"first_ionization\": \"6.6339\",\n \"density\": \"6.51\",\n \"melting_point\": \"2125.15\",\n \"boiling_point\": \"4682\",\n \"isotrops\": \"20\",\n \"discoverer\": \"Klaproth\",\n \"year\": \"1789\",\n \"specific_heat\": \"0.278\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"41\",\n \"name\": \"Niobium\",\n \"symbol\": \"Nb\",\n \"block\": \"d\",\n \"atomic_mass\": \"92.91\",\n \"nn\": \"52\",\n \"np\": \"41\",\n \"ne\": \"41\",\n \"period\": \"5\",\n \"group\": \"5\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2.1\",\n \"electronegativity\": \"1.6\",\n \"first_ionization\": \"6.7589\",\n \"density\": \"8.57\",\n \"melting_point\": \"2741.15\",\n \"boiling_point\": \"5017\",\n \"isotrops\": \"24\",\n \"discoverer\": \"Hatchett\",\n \"year\": \"1801\",\n \"specific_heat\": \"0.265\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"42\",\n \"name\": \"Molybdenum\",\n \"symbol\": \"Mo\",\n \"block\": \"d\",\n \"atomic_mass\": \"95.96\",\n \"nn\": \"54\",\n \"np\": \"42\",\n \"ne\": \"42\",\n \"period\": \"5\",\n \"group\": \"6\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2\",\n \"electronegativity\": \"2.16\",\n \"first_ionization\": \"7.0924\",\n \"density\": \"10.2\",\n \"melting_point\": \"2890.15\",\n \"boiling_point\": \"4912\",\n \"isotrops\": \"20\",\n \"discoverer\": \"Scheele\",\n \"year\": \"1778\",\n \"specific_heat\": \"0.251\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"43\",\n \"name\": \"Technetium\",\n \"symbol\": \"Tc\",\n \"block\": \"d\",\n \"atomic_mass\": \"98\",\n \"nn\": \"55\",\n \"np\": \"43\",\n \"ne\": \"43\",\n \"period\": \"5\",\n \"group\": \"7\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2\",\n \"electronegativity\": \"1.9\",\n \"first_ionization\": \"7.28\",\n \"density\": \"11.5\",\n \"melting_point\": \"2473.15\",\n \"boiling_point\": \"5150\",\n \"isotrops\": \"23\",\n \"discoverer\": \"Perrier and Segr?\",\n \"year\": \"1937\",\n \"specific_heat\": \"-1\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"44\",\n \"name\": \"Ruthenium\",\n \"symbol\": \"Ru\",\n \"block\": \"d\",\n \"atomic_mass\": \"101.07\",\n \"nn\": \"57\",\n \"np\": \"44\",\n \"ne\": \"44\",\n \"period\": \"5\",\n \"group\": \"8\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.9\",\n \"electronegativity\": \"2.2\",\n \"first_ionization\": \"7.3605\",\n \"density\": \"12.4\",\n \"melting_point\": \"2523.15\",\n \"boiling_point\": \"4423\",\n \"isotrops\": \"16\",\n \"discoverer\": \"Klaus\",\n \"year\": \"1844\",\n \"specific_heat\": \"0.238\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"45\",\n \"name\": \"Rhodium\",\n \"symbol\": \"Rh\",\n \"block\": \"d\",\n \"atomic_mass\": \"102.91\",\n \"nn\": \"58\",\n \"np\": \"45\",\n \"ne\": \"45\",\n \"period\": \"5\",\n \"group\": \"9\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"2.28\",\n \"first_ionization\": \"7.4589\",\n \"density\": \"12.4\",\n \"melting_point\": \"2239.15\",\n \"boiling_point\": \"3968\",\n \"isotrops\": \"20\",\n \"discoverer\": \"Wollaston\",\n \"year\": \"1803\",\n \"specific_heat\": \"0.243\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"46\",\n \"name\": \"Palladium\",\n \"symbol\": \"Pd\",\n \"block\": \"d\",\n \"atomic_mass\": \"106.42\",\n \"nn\": \"60\",\n \"np\": \"46\",\n \"ne\": \"46\",\n \"period\": \"5\",\n \"group\": \"10\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"2.2\",\n \"first_ionization\": \"8.3369\",\n \"density\": \"12\",\n \"melting_point\": \"1825.15\",\n \"boiling_point\": \"3236\",\n \"isotrops\": \"21\",\n \"discoverer\": \"Wollaston\",\n \"year\": \"1803\",\n \"specific_heat\": \"0.244\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"47\",\n \"name\": \"Silver\",\n \"symbol\": \"Ag\",\n \"block\": \"d\",\n \"atomic_mass\": \"107.87\",\n \"nn\": \"61\",\n \"np\": \"47\",\n \"ne\": \"47\",\n \"period\": \"5\",\n \"group\": \"11\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"1.93\",\n \"first_ionization\": \"7.5762\",\n \"density\": \"10.5\",\n \"melting_point\": \"1234.15\",\n \"boiling_point\": \"2435\",\n \"isotrops\": \"27\",\n \"discoverer\": \"Prehistoric\",\n \"year\": \"0\",\n \"specific_heat\": \"0.235\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"48\",\n \"name\": \"Cadmium\",\n \"symbol\": \"Cd\",\n \"block\": \"d\",\n \"atomic_mass\": \"112.41\",\n \"nn\": \"64\",\n \"np\": \"48\",\n \"ne\": \"48\",\n \"period\": \"5\",\n \"group\": \"12\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.7\",\n \"electronegativity\": \"1.69\",\n \"first_ionization\": \"8.9938\",\n \"density\": \"8.69\",\n \"melting_point\": \"594.33\",\n \"boiling_point\": \"1040\",\n \"isotrops\": \"22\",\n \"discoverer\": \"Stromeyer\",\n \"year\": \"1817\",\n \"specific_heat\": \"0.232\",\n \"shells\": \"5\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"57\",\n \"name\": \"Lanthanum\",\n \"symbol\": \"La\",\n \"block\": \"d\",\n \"atomic_mass\": \"138.91\",\n \"nn\": \"82\",\n \"np\": \"57\",\n \"ne\": \"57\",\n \"period\": \"6\",\n \"group\": \"-1\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Lanthanide\",\n \"atomic_radius\": \"2.7\",\n \"electronegativity\": \"1.1\",\n \"first_ionization\": \"5.5769\",\n \"density\": \"6.15\",\n \"melting_point\": \"1193.15\",\n \"boiling_point\": \"3737\",\n \"isotrops\": \"19\",\n \"discoverer\": \"Mosander\",\n \"year\": \"1839\",\n \"specific_heat\": \"0.195\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"72\",\n \"name\": \"Hafnium\",\n \"symbol\": \"Hf\",\n \"block\": \"d\",\n \"atomic_mass\": \"178.49\",\n \"nn\": \"106\",\n \"np\": \"72\",\n \"ne\": \"72\",\n \"period\": \"6\",\n \"group\": \"4\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2.2\",\n \"electronegativity\": \"1.3\",\n \"first_ionization\": \"6.8251\",\n \"density\": \"13.3\",\n \"melting_point\": \"2500.15\",\n \"boiling_point\": \"4876\",\n \"isotrops\": \"17\",\n \"discoverer\": \"Coster and von Hevesy\",\n \"year\": \"1923\",\n \"specific_heat\": \"0.144\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"73\",\n \"name\": \"Tantalum\",\n \"symbol\": \"Ta\",\n \"block\": \"d\",\n \"atomic_mass\": \"180.95\",\n \"nn\": \"108\",\n \"np\": \"73\",\n \"ne\": \"73\",\n \"period\": \"6\",\n \"group\": \"5\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2.1\",\n \"electronegativity\": \"1.5\",\n \"first_ionization\": \"7.5496\",\n \"density\": \"16.7\",\n \"melting_point\": \"3269.15\",\n \"boiling_point\": \"5731\",\n \"isotrops\": \"19\",\n \"discoverer\": \"Ekeberg\",\n \"year\": \"1801\",\n \"specific_heat\": \"0.14\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"74\",\n \"name\": \"Tungsten\",\n \"symbol\": \"W\",\n \"block\": \"d\",\n \"atomic_mass\": \"183.84\",\n \"nn\": \"110\",\n \"np\": \"74\",\n \"ne\": \"74\",\n \"period\": \"6\",\n \"group\": \"6\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2\",\n \"electronegativity\": \"2.36\",\n \"first_ionization\": \"7.864\",\n \"density\": \"19.3\",\n \"melting_point\": \"3680.15\",\n \"boiling_point\": \"5828\",\n \"isotrops\": \"22\",\n \"discoverer\": \"J. and F. d'Elhuyar\",\n \"year\": \"1783\",\n \"specific_heat\": \"0.132\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"75\",\n \"name\": \"Rhenium\",\n \"symbol\": \"Re\",\n \"block\": \"d\",\n \"atomic_mass\": \"186.21\",\n \"nn\": \"111\",\n \"np\": \"75\",\n \"ne\": \"75\",\n \"period\": \"6\",\n \"group\": \"7\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"2\",\n \"electronegativity\": \"1.9\",\n \"first_ionization\": \"7.8335\",\n \"density\": \"21\",\n \"melting_point\": \"3453.15\",\n \"boiling_point\": \"5869\",\n \"isotrops\": \"21\",\n \"discoverer\": \"Noddack, Berg, and Tacke\",\n \"year\": \"1925\",\n \"specific_heat\": \"0.137\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"76\",\n \"name\": \"Osmium\",\n \"symbol\": \"Os\",\n \"block\": \"d\",\n \"atomic_mass\": \"190.23\",\n \"nn\": \"114\",\n \"np\": \"76\",\n \"ne\": \"76\",\n \"period\": \"6\",\n \"group\": \"8\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.9\",\n \"electronegativity\": \"2.2\",\n \"first_ionization\": \"8.4382\",\n \"density\": \"22.6\",\n \"melting_point\": \"3300.15\",\n \"boiling_point\": \"5285\",\n \"isotrops\": \"19\",\n \"discoverer\": \"Tennant\",\n \"year\": \"1803\",\n \"specific_heat\": \"0.13\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"77\",\n \"name\": \"Iridium\",\n \"symbol\": \"Ir\",\n \"block\": \"d\",\n \"atomic_mass\": \"192.22\",\n \"nn\": \"115\",\n \"np\": \"77\",\n \"ne\": \"77\",\n \"period\": \"6\",\n \"group\": \"9\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.9\",\n \"electronegativity\": \"2.2\",\n \"first_ionization\": \"8.967\",\n \"density\": \"22.6\",\n \"melting_point\": \"2716.15\",\n \"boiling_point\": \"4701\",\n \"isotrops\": \"25\",\n \"discoverer\": \"Tennant\",\n \"year\": \"1804\",\n \"specific_heat\": \"0.131\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"78\",\n \"name\": \"Platinum\",\n \"symbol\": \"Pt\",\n \"block\": \"d\",\n \"atomic_mass\": \"195.08\",\n \"nn\": \"117\",\n \"np\": \"78\",\n \"ne\": \"78\",\n \"period\": \"6\",\n \"group\": \"10\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"2.28\",\n \"first_ionization\": \"8.9587\",\n \"density\": \"21.5\",\n \"melting_point\": \"2045.15\",\n \"boiling_point\": \"4098\",\n \"isotrops\": \"32\",\n \"discoverer\": \"Ulloa/Wood\",\n \"year\": \"1735\",\n \"specific_heat\": \"0.133\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"79\",\n \"name\": \"Gold\",\n \"symbol\": \"Au\",\n \"block\": \"d\",\n \"atomic_mass\": \"196.97\",\n \"nn\": \"118\",\n \"np\": \"79\",\n \"ne\": \"79\",\n \"period\": \"6\",\n \"group\": \"11\",\n \"phase\": \"solid\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"2.54\",\n \"first_ionization\": \"9.2255\",\n \"density\": \"19.3\",\n \"melting_point\": \"1337.73\",\n \"boiling_point\": \"3129\",\n \"isotrops\": \"21\",\n \"discoverer\": \"Prehistoric\",\n \"year\": \"0\",\n \"specific_heat\": \"0.129\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"80\",\n \"name\": \"Mercury\",\n \"symbol\": \"Hg\",\n \"block\": \"d\",\n \"atomic_mass\": \"200.59\",\n \"nn\": \"121\",\n \"np\": \"80\",\n \"ne\": \"80\",\n \"period\": \"6\",\n \"group\": \"12\",\n \"phase\": \"liq\",\n \"radioactive\": \"FALSE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"1.8\",\n \"electronegativity\": \"2\",\n \"first_ionization\": \"10.4375\",\n \"density\": \"13.5\",\n \"melting_point\": \"234.43\",\n \"boiling_point\": \"630\",\n \"isotrops\": \"26\",\n \"discoverer\": \"Prehistoric\",\n \"year\": \"0\",\n \"specific_heat\": \"0.14\",\n \"shells\": \"6\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"89\",\n \"name\": \"Actinium\",\n \"symbol\": \"Ac\",\n \"block\": \"d\",\n \"atomic_mass\": \"227\",\n \"nn\": \"138\",\n \"np\": \"89\",\n \"ne\": \"89\",\n \"period\": \"7\",\n \"group\": \"0\",\n \"phase\": \"solid\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"TRUE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Actinide\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"1.1\",\n \"first_ionization\": \"5.17\",\n \"density\": \"10.1\",\n \"melting_point\": \"1323.15\",\n \"boiling_point\": \"3471\",\n \"isotrops\": \"11\",\n \"discoverer\": \"Debierne/Giesel\",\n \"year\": \"1899\",\n \"specific_heat\": \"0.12\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"104\",\n \"name\": \"Rutherfordium\",\n \"symbol\": \"Rf\",\n \"block\": \"d\",\n \"atomic_mass\": \"261\",\n \"nn\": \"157\",\n \"np\": \"104\",\n \"ne\": \"104\",\n \"period\": \"7\",\n \"group\": \"4\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"18.1\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Ghiorso et al.\",\n \"year\": \"1969\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"105\",\n \"name\": \"Dubnium\",\n \"symbol\": \"Db\",\n \"block\": \"d\",\n \"atomic_mass\": \"262\",\n \"nn\": \"157\",\n \"np\": \"105\",\n \"ne\": \"105\",\n \"period\": \"7\",\n \"group\": \"5\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"39\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Ghiorso et al.\",\n \"year\": \"1970\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"106\",\n \"name\": \"Seaborgium\",\n \"symbol\": \"Sg\",\n \"block\": \"d\",\n \"atomic_mass\": \"266\",\n \"nn\": \"160\",\n \"np\": \"106\",\n \"ne\": \"106\",\n \"period\": \"7\",\n \"group\": \"6\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"35\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Ghiorso et al.\",\n \"year\": \"1974\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"107\",\n \"name\": \"Bohrium\",\n \"symbol\": \"Bh\",\n \"block\": \"d\",\n \"atomic_mass\": \"264\",\n \"nn\": \"157\",\n \"np\": \"107\",\n \"ne\": \"107\",\n \"period\": \"7\",\n \"group\": \"7\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"37\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Armbruster and M?nzenberg\",\n \"year\": \"1981\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"108\",\n \"name\": \"Hassium\",\n \"symbol\": \"Hs\",\n \"block\": \"d\",\n \"atomic_mass\": \"267\",\n \"nn\": \"159\",\n \"np\": \"108\",\n \"ne\": \"108\",\n \"period\": \"7\",\n \"group\": \"8\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"41\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Armbruster and M?nzenberg\",\n \"year\": \"1983\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"109\",\n \"name\": \"Meitnerium\",\n \"symbol\": \"Mt\",\n \"block\": \"d\",\n \"atomic_mass\": \"268\",\n \"nn\": \"159\",\n \"np\": \"109\",\n \"ne\": \"109\",\n \"period\": \"7\",\n \"group\": \"9\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"35\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"GSI, Darmstadt, West Germany\",\n \"year\": \"1982\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"110\",\n \"name\": \"Darmstadtium \",\n \"symbol\": \"Ds \",\n \"block\": \"d\",\n \"atomic_mass\": \"271\",\n \"nn\": \"161\",\n \"np\": \"110\",\n \"ne\": \"110\",\n \"period\": \"7\",\n \"group\": \"10\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"-1\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Not_defined\",\n \"year\": \"1994\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"111\",\n \"name\": \"Roentgenium \",\n \"symbol\": \"Rg \",\n \"block\": \"d\",\n \"atomic_mass\": \"272\",\n \"nn\": \"161\",\n \"np\": \"111\",\n \"ne\": \"111\",\n \"period\": \"7\",\n \"group\": \"11\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"-1\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Not_defined\",\n \"year\": \"1994\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n },\n {\n \"atomic_no\": \"112\",\n \"name\": \"Copernicium \",\n \"symbol\": \"Cn \",\n \"block\": \"d\",\n \"atomic_mass\": \"285\",\n \"nn\": \"173\",\n \"np\": \"112\",\n \"ne\": \"112\",\n \"period\": \"7\",\n \"group\": \"12\",\n \"phase\": \"artificial\",\n \"radioactive\": \"TRUE\",\n \"natural\": \"FALSE\",\n \"metal\": \"TRUE\",\n \"non_metal\": \"FALSE\",\n \"metalloid\": \"FALSE\",\n \"type\": \"Transition Metal\",\n \"atomic_radius\": \"0\",\n \"electronegativity\": \"-1\",\n \"first_ionization\": \"-1\",\n \"density\": \"-1\",\n \"melting_point\": \"-1\",\n \"boiling_point\": \"-1\",\n \"isotrops\": \"-1\",\n \"discoverer\": \"Not_defined\",\n \"year\": \"1996\",\n \"specific_heat\": \"-1\",\n \"shells\": \"7\",\n \"valence\": \"-1\"\n }\n]\n","repo_name":"niteshsinha17/PT","sub_path":"backend/PeriodicTable/Element/management/commands/elements_data/d_block.py","file_name":"d_block.py","file_ext":"py","file_size_in_byte":33000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6984074636","text":"import csv\n\n\nclass CSVReader:\n\n def __init__(self, file_object, header, columns_mapper, delimiter=None):\n file_object.seek(0)\n self._header = header\n self._columns_mapper = columns_mapper\n self._rows = csv.DictReader(file_object, delimiter=delimiter or '|')\n\n def _validate_header(self):\n if not self._rows.fieldnames == self._header:\n raise csv.Error('Invalid Header: expected {}, got {}'.format(\n self._header,\n self._rows.fieldnames\n ))\n\n def get_data(self):\n self._validate_header()\n return self._columns_mapper(self._rows)\n","repo_name":"michel-rodrigues/viggio_backend","sub_path":"app/utils/csv_reader.py","file_name":"csv_reader.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38778763662","text":"\"\"\"Statistics handler.\"\"\"\nfrom datetime import date\n\nfrom sqlalchemy.orm.scoping import scoped_session\n\nfrom pollbot.models.user import User\n\n\ndef increase_stat(session: scoped_session, name: str) -> None:\n \"\"\"Increase a specific statistic.\"\"\"\n from pollbot.models import DailyStatistic\n\n mapping = {\n \"votes\": DailyStatistic.votes,\n \"callback_calls\": DailyStatistic.callback_calls,\n \"new_users\": DailyStatistic.new_users,\n \"created_polls\": DailyStatistic.created_polls,\n \"externally_shared\": DailyStatistic.externally_shared,\n \"show_results\": DailyStatistic.show_results,\n \"notifications\": DailyStatistic.notifications,\n }\n\n column = mapping[name]\n session.query(DailyStatistic).filter(DailyStatistic.date == date.today()).update(\n {name: column + 1}\n )\n\n\ndef increase_user_stat(session: scoped_session, user: User, name: str) -> None:\n \"\"\"Increase a specific statistic.\"\"\"\n from pollbot.models import UserStatistic\n\n mapping = {\n \"callback_calls\": UserStatistic.callback_calls,\n \"votes\": UserStatistic.votes,\n \"poll_callback_calls\": UserStatistic.poll_callback_calls,\n \"created_polls\": UserStatistic.created_polls,\n \"inline_shares\": UserStatistic.inline_shares,\n }\n\n column = mapping[name]\n session.query(UserStatistic).filter(UserStatistic.user == user).filter(\n UserStatistic.date == date.today()\n ).update({name: column + 1})\n","repo_name":"Nukesor/ultimate-poll-bot","sub_path":"pollbot/helper/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"3"} +{"seq_id":"18282499562","text":"'''\nAuthor: fghpdf\nDate: 2022-01-11 09:16:52\nLastEditTime: 2022-01-11 09:24:54\nLastEditors: fghpdf\n'''\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def balanceBST(self, root: TreeNode) -> TreeNode:\n def inOrder(root: TreeNode):\n if not root:\n return []\n sortedArray = []\n sortedArray.extend(inOrder(root.left))\n sortedArray.append(root)\n sortedArray.extend(inOrder(root.right))\n\n return sortedArray\n\n def sortedArrayToBST(sortedArray: List[TreeNode], left: int, right: int) -> TreeNode:\n if left > right:\n return None\n\n mid = (left + right) // 2\n root = sortedArray[mid]\n root.left = sortedArrayToBST(sortedArray, left, mid-1)\n root.right = sortedArrayToBST(sortedArray, mid, right)\n\n return root\n\n sortedArray = inOrder(root)\n return sortedArrayToBST(sortedArray, 0, len(sortedArray) - 1)\n\n","repo_name":"fghpdf/leetcode","sub_path":"py/balance_a_binary_search_tree/balance_a_binary_search_tree.py","file_name":"balance_a_binary_search_tree.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"32871386306","text":"__author__ = \"Adam Wild\"\n__copyright__ = \"Copyright 2016-2019 The Aramis Lab Team\"\n__credits__ = [\"Adam Wild\"]\n__license__ = \"See LICENSE.txt file\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Adam Wild\"\n__email__ = \"adam.wild@icm-institute.org\"\n__status__ = \"Development\"\n\n\nclass Parse_clinical():\n '''Creates the clinical files for the BIDS directory'''\n\n def __init__(self, path_clinical):\n import pandas as pd\n import os.path as path\n\n self.df_dict_mod = pd.read_csv(path.join(path_clinical, 'clinical_info.tsv'), sep='\\t')\n self.df_clinical = pd.read_excel(path.join(path_clinical, 'NIFD_Clinical_Data_2017_final_updated.xlsx'))\n self.df_ida = pd.read_csv(path.join(path_clinical, 'ida.tsv'), sep='\\t')\n\n self.merge_clinical = self.merge_clinical()\n self.df_clinical = self.merge_clinical\n\n def make_sessions_ida(self, pat_name):\n '''\n Preprocesses ida for the left join operated in merge_clinical_scans\n\n Args:\n pat_name: subject_ID\n '''\n\n def write_ses(num):\n num = str(num)\n num = '0' + num if len(num) == 1 else num\n return 'ses-M' + num\n\n bloc = self.df_ida[self.df_ida['Subject ID'] == pat_name]\n bloc = bloc[['Visit', 'Study Date', 'Age', 'Weight', 'Research Group']]\n\n bloc['Visit'] = bloc['Visit'].apply(lambda x: int(x.split(' ')[1]))\n bloc = bloc.sort_values(['Visit'], ascending=[True])\n bloc['Visit'] = bloc['Visit'].apply(lambda x: write_ses(x))\n bloc = bloc.groupby('Visit').first().reset_index()\n bloc.columns = ['session_id', 'examination_date', 'age', 'weight', 'research_group']\n\n return bloc\n\n def merge_clinical_scans(self, pat_name):\n \"\"\"\n Operates a left join between the ida and clinical table\n For a given patient, we have the usual ida file, extended with the information from the clinical table\n\n Args:\n pat_name: Name of the subject\n\n Returns:\n dfMerge: pandas dataframe corresponding to the left join\n \"\"\"\n import pandas as pd\n import warnings\n\n bloc = self.df_clinical[self.df_clinical['LONI_ID'] == pat_name]\n\n def parse_date(x):\n x = str(x).split(' ')[0]\n x = x.split('-')\n sol = x[1] + '/' + x[2] + '/' + x[0]\n return sol\n\n warnings.simplefilter('ignore')\n bloc['CLINICAL_LINKDATE'] = bloc['CLINICAL_LINKDATE'].apply(lambda x: parse_date(x))\n\n bloc_ida = self.make_sessions_ida(pat_name)\n bloc_ida['examination_date'] = bloc_ida['examination_date'].apply(\n lambda x: '0' + str(x).split(' ')[0] if str(x).split(' ')[0][0] != '0' else str(x).split(' ')[0])\n\n dfMerge = pd.merge(bloc_ida, bloc, left_on='examination_date', right_on='CLINICAL_LINKDATE')\n dfMerge = dfMerge.drop(columns=['CLINICAL_LINKDATE', 'LONI_ID'])\n\n return dfMerge\n\n def merge_clinical(self):\n \"\"\"\n Operates a left join between the clinical and ida table\n The table that we obtain is the usual clinical table with data, when examination_date and CLINICAL_LINKDATE are the same, from the ida table\n\n Returns:\n dfSol: pandas dataframe corresponding to the left join\n \"\"\"\n # Could be optimized\n dfSol = self.df_clinical\n\n dfSol.insert(4, 'Weight', '')\n dfSol.insert(4, 'Research Group', '')\n dfSol.insert(4, 'Age', '')\n\n def parse_date(x):\n x = str(x).split(' ')[0]\n x = x.split('-')\n sol = x[1] + '/' + x[2] + '/' + x[0]\n return sol\n\n curr_sub = None\n for index, row in dfSol.iterrows():\n if row['LONI_ID'] != curr_sub:\n curr_sub = row['LONI_ID']\n dfMerge = self.merge_clinical_scans(curr_sub)\n info = dfMerge[dfMerge['examination_date'] == parse_date(row['CLINICAL_LINKDATE'])]\n if not info.empty:\n dfSol.loc[index, 'Weight'] = float(info['weight'])\n dfSol.loc[index, 'Research Group'] = str(info.iloc[0]['research_group'])\n dfSol.loc[index, 'Age'] = int(info['age'])\n\n return dfSol\n\n def get_clinical_ida(self):\n \"\"\" Left join clinical data and ida, new version \"\"\"\n import pandas as pd\n import math\n\n df_clinical = self.df_clinical.copy()\n df_ida = self.df_ida.copy()\n df_ida['Session_number'] = df_ida['Visit'].apply(lambda x: int(x.split(' ')[-1]))\n\n df_ida = df_ida.groupby(['Subject ID', 'Study Date']).first().reset_index()\n df_ida = df_ida.sort_values(['Subject ID', 'Session_number']).reset_index()\n\n def parse_date(x):\n x = x.split('/')\n for i in range(len(x)):\n if len(x[i]) == 1:\n x[i] = '0' + x[i]\n sol = x[1] + '/' + x[0] + '/' + x[2]\n return sol\n\n def parse_date2(x):\n x = str(x).split(' ')[0]\n x = x.split('-')\n sol = x[2] + '/' + x[1] + '/' + x[0]\n return sol\n\n df_ida['Study Date'] = df_ida['Study Date'].apply(lambda x: parse_date(x))\n df_clinical['CLINICAL_LINKDATE'] = df_clinical['CLINICAL_LINKDATE'].apply(lambda x: parse_date2(x))\n\n dfSol = pd.merge(df_clinical, df_ida, how='left', left_on=['LONI_ID', 'CLINICAL_LINKDATE'],\n right_on=['Subject ID', 'Study Date'])\n dfSol.insert(0, 'session_id', '')\n\n dfSol['session_id'] = dfSol['Session_number'].apply(\n lambda x: '' if math.isnan(x) else ('ses-M' + str(int(x)) if x > 10 else 'ses-M0' + str(int(x))))\n\n return dfSol\n\n def make_sessions_type(self, pat_name, keep_all=False):\n \"\"\" Updated version of make_sessions\n\n Args:\n pat_name: subject_ID of a patient\n keep_all: if True, include clinical data not linked to a MRI, else include only clinical data linked to a MRI\n\n Returns:\n bloc: pandas dataframe corresponding to the \"sessions.tsv\" file\"\"\"\n\n name_clinical, name_BIDS = self.get_names('sessions')\n name_clinical.insert(0, 'session_id')\n name_BIDS.insert(0, 'session_id')\n name_clinical.extend(['Age_x', 'Research Group_x', 'Weight_x'])\n name_BIDS.extend(['age', 'research_group', 'weight'])\n\n df_clinical_ida = self.get_clinical_ida()\n if not keep_all:\n df_clinical_ida = df_clinical_ida[df_clinical_ida['session_id'] != '']\n\n bloc = df_clinical_ida[df_clinical_ida['LONI_ID'] == pat_name]\n bloc = bloc[name_clinical]\n bloc.columns = name_BIDS\n\n return bloc\n\n def get_names(self, file='sessions'):\n \"\"\"\n Takes the clinical and BIDS_CLINICA fields that will be included in the clinical file \"file\"\n Returns them in list form\n\n Args:\n file: Corresponds to the clinical file that is about to be created, file = \"sessions\" or \"participants\"\n\n Returns:\n name_clinical, name_BIDS: 2 lists of fields\n \"\"\"\n bloc = self.df_dict_mod[self.df_dict_mod['FILE'] == file]\n name_clinical = bloc['COLUMN_NAME'].tolist()\n name_BIDS = bloc['BIDS_CLINICA'].tolist()\n\n return name_clinical, name_BIDS\n\n def make_sessions(self, pat_name):\n '''\n Creates the sessions file for a given patient\n\n Args:\n pat_name: subject_ID of a patient\n\n Returns:\n bloc: pandas dataframe corresponding to the \"sessions.tsv\" file\n '''\n name_clinical, name_BIDS = self.get_names('sessions')\n\n bloc = self.df_clinical[self.df_clinical['LONI_ID'] == pat_name]\n bloc = bloc[name_clinical]\n bloc.columns = name_BIDS\n\n return bloc\n\n def make_participants(self, pat_list=None):\n '''\n Creates the participants file for all patients\n\n Args:\n pat_list: subject_ID of all patients found in the converted BIDS directory\n\n Returns:\n bloc: pandas dataframe corresponding to the \"participants.tsv\" file\n '''\n name_clinical, name_BIDS = self.get_names('participants')\n bloc = self.df_clinical.groupby('LONI_ID').first().reset_index()\n bloc = bloc[name_clinical]\n bloc['LONI_ID'] = bloc['LONI_ID'].apply(lambda x: 'sub-NIFD' + x.replace(\"_\", \"\"))\n bloc.columns = name_BIDS\n\n if pat_list is not None:\n bloc = bloc[bloc['participant_id'].isin(pat_list)]\n\n # Enforce the clinica_BIDS convention\n if 'sex' in list(bloc):\n bloc['sex'] = bloc['sex'].apply(lambda x: 'M' if x == 1 else 'F')\n\n return bloc\n\n def make_scans(self, path_scans):\n \"\"\"\n Creates the scans file for a patient's session\n\n Args:\n path_scans: path to a session for a patient\n\n Returns:\n bloc: pandas dataframe corresponding to the \"participants.tsv\" file\n \"\"\"\n import os\n\n s = 'filename\tscan_id\tmri_field\\n'\n subs = [f.path.split('/')[-1] for f in os.scandir(path_scans) if f.is_dir()]\n for sub in subs:\n name = os.listdir(os.path.join(path_scans, sub))\n name = [i for i in name if i != '.DS_Store' and (i.endswith('.nii.gz') or i.endswith('.nii'))]\n\n for n in name:\n s += sub + '/' + n + '\\n'\n return s\n\n def write(self, df, path, name):\n \"\"\"\n Saves a pandas dataframe\n\n Args:\n df: a pandas dataframe\n path: Path where the dataframe is to be saved\n name: name of the output file (/!\\ do not include the extension, '.tsv' is added in the function)\n \"\"\"\n import os\n\n name = os.path.join(path, name) + '.tsv'\n df.to_csv(sep='\\t', path_or_buf=name, index=False)\n\n def make_all(self, pathBIDS):\n \"\"\"\n Makes the participants.tsv and all sessions.tsv files for all subjects available in the BIDS directory\n\n Args:\n pathBIDS: path to the BIDS directory\n \"\"\"\n import os\n\n pat_list = os.listdir(pathBIDS)\n pat_list = [elt for elt in pat_list if elt.startswith('sub')]\n\n assert pat_list != [], 'BIDS directory is empty'\n\n self.write(self.make_participants(pat_list), pathBIDS, 'participants')\n\n for pat in pat_list:\n path_sessions = os.path.join(pathBIDS, pat)\n pat2 = pat[8] + '_S_' + pat[10:14]\n\n self.write(self.make_sessions_type(pat2), path_sessions, pat + '_sessions')\n\n def make_all_scans(self, to_convert):\n \"\"\"\n Makes the scans.tsv files for all subjects available in the BIDS directory\n\n Args:\n to_convert: List of tuples of paths (path_in, path_out), computed for the initial image conversion\n \"\"\"\n\n import os\n root = '/' + os.path.join(*to_convert[0][1].split('/')[:-4])\n\n def make_dic_tuples(to_convert, root):\n sol = {}\n dic_pat_sess = {}\n for tuple in to_convert:\n s_path1 = tuple[1].split('/')\n\n key = os.path.join(root, s_path1[-4], s_path1[-3])\n\n if s_path1[-4] not in dic_pat_sess:\n dic_pat_sess[s_path1[-4]] = [s_path1[-3]]\n elif s_path1[-3] not in dic_pat_sess[s_path1[-4]]:\n dic_pat_sess[s_path1[-4]].append(s_path1[-3])\n\n if key not in sol:\n sol[key] = [tuple]\n else:\n sol[key].append(tuple)\n\n return sol, dic_pat_sess\n\n def make_template():\n import pandas as pd\n\n new_cols = []\n\n for cell in list(self.df_ida['Imaging Protocol']):\n if isinstance(cell, type('a')):\n col_to_add = [i.split('=')[0] for i in cell.split(';')]\n for i in col_to_add:\n if i not in new_cols:\n new_cols.append(i)\n\n new_cols.extend(['Modality', 'Description', 'Type', 'Image ID'])\n new_cols.insert(0, 'filename')\n sol = pd.DataFrame(columns=new_cols)\n return sol\n\n def extend_line(df_line_ida, template):\n col_values = {}\n s = list(df_line_ida['Imaging Protocol'])[0]\n for coup in s.split(';'):\n col_values[coup.split('=')[0]] = coup.split('=')[1]\n sol = df_line_ida\n for col_name in col_values:\n sol.insert(0, col_name, col_values[col_name])\n for name in list(template):\n if name not in list(sol):\n sol.insert(0, name, '')\n return sol[list(template)]\n\n dic_tuples, dic_pat_sess = make_dic_tuples(to_convert, root)\n template = make_template()\n\n for sub_id in dic_pat_sess:\n for ses_num in dic_pat_sess[sub_id]:\n df_ses = template.copy()\n for tuple in dic_tuples[os.path.join(root, sub_id, ses_num)]:\n s_path0 = tuple[0].split('/')\n s_path1 = tuple[1].split('/')\n filename = os.path.join(s_path1[-2], s_path1[-1]) + '.nii.gz'\n df_line_ida = self.df_ida[\n (self.df_ida['Subject ID'] == s_path1[-1][8] + '_S_' + s_path1[-1][10:14]) &\n (self.df_ida['Visit'] == 'Month ' + str(int(ses_num.split('M')[-1]))) &\n (self.df_ida['Description'] == s_path0[-3])]\n\n if df_line_ida.empty:\n df_line_ida = self.df_ida[\n (self.df_ida['Subject ID'] == s_path1[-1][8] + '_S_' + s_path1[-1][10:14]) &\n (self.df_ida['Visit'] == 'Month ' + str(int(ses_num.split('M')[-1]))) &\n (self.df_ida['Description'] == s_path0[-3].replace('_', ' '))]\n\n # TR_BRAIN_3D_PIB_IR_CTAC -> TR:BRAIN 3D:PIB:IR CTAC\n if df_line_ida.empty:\n des = s_path0[-3].split('_')\n if len(des) == 6:\n des = des[0] + ':' + des[1] + ' ' + des[2] + ':' + des[3] + ':' + des[4] + ' ' + des[5]\n else:\n des = des[0] + ':' + des[1] + ' ' + des[2] + ':' + des[3] + ':' + des[4] + ' ' + des[\n 5] + ' ' + des[6]\n df_line_ida = self.df_ida[\n (self.df_ida['Subject ID'] == s_path1[-1][8] + '_S_' + s_path1[-1][10:14]) &\n (self.df_ida['Visit'] == 'Month ' + str(int(ses_num.split('M')[-1]))) &\n (self.df_ida['Description'] == des)]\n\n df_line_ida.insert(0, 'filename', filename)\n df_line_ida = extend_line(df_line_ida, template)\n\n df_ses = df_ses.append(df_line_ida, ignore_index=True)\n\n columns = list(df_ses)\n columns[columns.index('Field Strength')] = 'mri_field'\n df_ses.columns = columns\n\n self.write(df_ses, os.path.join(root, sub_id, ses_num), sub_id + '_' + ses_num + '_scans')\n","repo_name":"adamwild/clinica","sub_path":"clinica/iotools/converters/nifd_to_bids/utils/parse_clinical.py","file_name":"parse_clinical.py","file_ext":"py","file_size_in_byte":15305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"21574015765","text":"__author__ = \"vcancy\"\n\n\n# /usr/bin/python\n# -*-coding:utf-8-*-\n\n\"\"\"\n\n分析(DFS):首先找到目标点,然后从目标点发散开[上下左右四个方向]。\n\n然后当我们到达新的方向之后怎么办?继续DFS,那就是递归咯。\n\n递归边界条件:数组是不越界,对应值为1,之前没有找到过这个点\n\n用一个set保存已经递归过的点,下次遇到直接跳过\n\n\"\"\"\n\nclass Solution:\n def maxAreaOfIsland(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n seen = set() # 保存已经探查过的点\n mx = 0\n\n def dfs(grid, i, j):\n '''\n 递归完成四个方向搜索\n '''\n area = 0\n if 0 <= i < len(grid) and 0 <= j < len(grid[0]) and grid[i][j] == 1 and (i, j) not in seen:# 边界条件\n seen.add((i, j))\n area = 1 + dfs(grid, i - 1, j) + dfs(grid, i + 1, j) + dfs(grid, i, j - 1) + dfs(grid, i, j + 1)\n return area\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1 and (i, j) not in seen:\n mx = max(mx, dfs(grid, i, j))\n\n return mx\n","repo_name":"vcancy/python-algorithm","sub_path":"leetcode/Algorithms/Array/695.py","file_name":"695.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29234011427","text":"from datetime import datetime, timedelta\n\nimport pytest\nfrom freezegun import freeze_time\nfrom smb.common.testing_utils import Any, dt\n\nfrom maps_adv.geosmb.doorman.server.lib.enums import (\n CallEvent,\n ClientGender,\n OrderEvent,\n SegmentType,\n Source,\n)\n\npytestmark = [pytest.mark.asyncio, pytest.mark.real_db]\n\n\nmerge_kwargs = dict(\n biz_id=123,\n source=Source.BOOKING_YANG,\n metadata={\"test_2\": 2},\n phone=9000000,\n email=\"email_updated@yandex.ru\",\n passport_uid=888,\n first_name=\"client_first_name_updated\",\n last_name=\"client_last_name_updated\",\n gender=ClientGender.FEMALE,\n comment=\"this is updated comment\",\n initiator_id=778899,\n)\n\n\nempty_merge_kwargs = dict(\n biz_id=123,\n source=Source.BOOKING_YANG,\n metadata=None,\n phone=None,\n email=None,\n passport_uid=None,\n first_name=None,\n last_name=None,\n gender=None,\n comment=None,\n initiator_id=None,\n)\n\n\nasync def test_updates_client_details(factory, dm):\n client_id = await factory.create_client()\n\n await dm.merge_client(client_id=client_id, **merge_kwargs)\n\n client_details = await factory.retrieve_client(client_id=client_id)\n assert client_details == dict(\n biz_id=123,\n phone=\"9000000\",\n email=\"email_updated@yandex.ru\",\n passport_uid=888,\n first_name=\"client_first_name_updated\",\n last_name=\"client_last_name_updated\",\n gender=ClientGender.FEMALE,\n comment=\"this is updated comment\",\n labels=[\"mark-2021\"],\n cleared_for_gdpr=False,\n )\n\n\nasync def test_creates_merge_revision(factory, con, dm):\n client_id = await factory.create_client()\n\n await dm.merge_client(client_id=client_id, **merge_kwargs)\n\n revisions = await factory.retrieve_client_revisions(client_id)\n assert len(revisions) == 2\n assert revisions[0] == dict(\n biz_id=123,\n source=\"BOOKING_YANG\",\n metadata={\"test_2\": 2},\n phone=\"9000000\",\n email=\"email_updated@yandex.ru\",\n passport_uid=888,\n first_name=\"client_first_name_updated\",\n last_name=\"client_last_name_updated\",\n gender=ClientGender.FEMALE,\n comment=\"this is updated comment\",\n initiator_id=778899,\n )\n\n\nasync def test_returns_merged_client_details(factory, dm):\n client_id = await factory.create_client()\n\n got = await dm.merge_client(client_id=client_id, **merge_kwargs)\n\n assert got == dict(\n id=client_id,\n biz_id=123,\n phone=9000000,\n email=\"email_updated@yandex.ru\",\n passport_uid=888,\n first_name=\"client_first_name_updated\",\n last_name=\"client_last_name_updated\",\n gender=ClientGender.FEMALE,\n comment=\"this is updated comment\",\n source=Source.CRM_INTERFACE,\n registration_timestamp=Any(datetime),\n segments=[SegmentType.NO_ORDERS],\n labels=[\"mark-2021\"],\n statistics={\n \"orders\": {\n \"total\": 0,\n \"successful\": 0,\n \"unsuccessful\": 0,\n \"last_order_timestamp\": None,\n }\n },\n )\n\n\n@freeze_time(\"2020-01-01 00:00:01\", tick=True)\nasync def test_returns_merged_client_segments(dm, factory):\n client_id = await factory.create_client()\n await factory.create_order_event(\n client_id,\n event_type=OrderEvent.CREATED,\n event_timestamp=dt(\"2018-11-03 00:00:01\"),\n )\n for _ in range(3):\n await factory.create_resolved_order_events_pair(\n client_id, OrderEvent.ACCEPTED, event_timestamp=dt(\"2019-11-03 00:00:01\")\n )\n\n got = await dm.merge_client(client_id=client_id, **merge_kwargs)\n\n assert got[\"segments\"] == [\n SegmentType.REGULAR,\n SegmentType.ACTIVE,\n SegmentType.UNPROCESSED_ORDERS,\n ]\n\n\nasync def test_calculates_merged_client_order_statistics(dm, factory):\n client_id = await factory.create_client()\n event_latest_ts = dt(\"2020-03-03 00:00:00\")\n for i in range(4):\n await factory.create_order_event(\n client_id,\n event_type=OrderEvent.CREATED,\n event_timestamp=event_latest_ts - timedelta(days=i),\n )\n for _ in range(2):\n await factory.create_order_event(client_id, event_type=OrderEvent.ACCEPTED)\n await factory.create_order_event(client_id, event_type=OrderEvent.REJECTED)\n await factory.create_call_event(client_id, event_type=CallEvent.INITIATED)\n\n got = await dm.merge_client(client_id=client_id, **merge_kwargs)\n\n assert got[\"statistics\"] == {\n \"orders\": {\n \"total\": 4,\n \"successful\": 2,\n \"unsuccessful\": 1,\n \"last_order_timestamp\": event_latest_ts,\n }\n }\n\n\nasync def test_not_replaces_client_details_with_none(factory, dm):\n client_id = await factory.create_client()\n\n await dm.merge_client(client_id=client_id, **empty_merge_kwargs)\n\n client_details = await factory.retrieve_client(client_id=client_id)\n assert client_details == dict(\n biz_id=123,\n phone=\"1234567890123\",\n email=\"email@yandex.ru\",\n passport_uid=456,\n first_name=\"client_first_name\",\n last_name=\"client_last_name\",\n gender=ClientGender.MALE,\n comment=\"this is comment\",\n labels=[\"mark-2021\"],\n cleared_for_gdpr=False,\n )\n\n\nasync def test_creates_revision_with_skipped_fields(dm, factory, con):\n client_id = await factory.create_client()\n\n await dm.merge_client(client_id=client_id, **empty_merge_kwargs)\n\n last_revision_details = await factory.retrieve_last_revision(client_id=client_id)\n assert await con.fetchval(\"SELECT COUNT(*) FROM client_revisions\") == 2\n assert last_revision_details == dict(\n biz_id=123,\n source=\"BOOKING_YANG\",\n metadata=None,\n phone=None,\n email=None,\n passport_uid=None,\n first_name=None,\n last_name=None,\n gender=None,\n comment=None,\n initiator_id=None,\n )\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/data_managers/test_merge_client.py","file_name":"test_merge_client.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23073537513","text":"class Orders:\n def __init__(self,name,order,payment,deliveryAdress,status) :\n self.order=order\n self.name=name\n self.payment=payment\n self.deliveryAddress=deliveryAdress\n self.status=status\n #create a dictionary of the order attribute and returns it\n def add_order(self):\n orders_dict={\n \"name\":self.name,\n \"order\":self.order,\n \"payment\":self.payment,\n \"deliveryAdress\":self.deliveryAddress,\n \"status\":self.status\n \n }\n return orders_dict\n # Prompts the user to input their details\nname=input(\"input your name\")\norder=input(\"Input the type of order\")\npayment=input(\"Input the type of payment\")\ndeliveryAddress=input(\"Input your delivery address\")\nstatus=input(\"Add status of your order\")\n # An instance of order is assigned to a variable called user\nnew_Customer=Orders(Name,order,payment,deliveryAddress,status)\nuser=new_Customer\nprint(new_Customer.add_order())\n","repo_name":"MariaGKimani/Backend-Mboga-Mtaani-","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13215290001","text":"#!/usr/bin/env python3\nfrom typing import List\n\"\"\"\nCalculator to solve the puzzle of https://adventofcode.com/2020/day/18\n\"\"\"\n # 2 * 3 + (4 * 5) becomes 26.\n # 5 + (8 * 3 + 9 + 3 * 4 * 3) becomes 437.\n # 5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) becomes 12240.\n # ((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 becomes 13632.\n\ndef nested_precedence(line: List[str]) -> List[str]:\n \"\"\"follows precedence ordering and returns a nested list\"\"\"\n nesting_lvl = 0\n nested = {nesting_lvl: []} \n for char in line:\n print(f'{char}', end='')\n if char == \"(\":\n # print('shift right')\n nesting_lvl += 1\n nested[nesting_lvl] = [char]\n continue\n\n nested[nesting_lvl].append(char)\n if char == \")\":\n # print('shift left')\n nesting_lvl -= 1\n\n return nested\n\n\n\nwith open('/Users/weber/Downloads/input-puzzle-aoc-18', encoding = 'utf-8') as file:\n line = file.readline()\n print(f'line: {line}')\n print(nested_precedence(line.replace(' ', '')))","repo_name":"los-floppos/days-of-code","sub_path":"day_001-aoc-18/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"16321339017","text":"# License: GNU Affero General Public License v3 or later\n# A copy of GNU AGPL v3 should have been included in this software package in LICENSE.txt.\n\n\"\"\" A collection of classes for representing a variety of feature types \"\"\"\n\nfrom collections import OrderedDict\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, Union\n\nfrom helperlibs.bio import seqio\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqFeature import SeqFeature, FeatureLocation, CompoundLocation\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .qualifiers import NRPSPKSQualifier, SecMetQualifier, GeneFunction, GeneFunctionAnnotations\n\n\nclass Feature:\n \"\"\" The base class of any feature. Contains only a location, the label of the\n subclass, the 'notes' qualifier, and other qualifiers not tracked by any\n subclass.\n \"\"\"\n __slots__ = [\"location\", \"notes\", \"type\", \"_qualifiers\", \"created_by_antismash\"]\n\n def __init__(self, location: FeatureLocation, feature_type: str,\n created_by_antismash: bool = False) -> None:\n assert isinstance(location, (FeatureLocation, CompoundLocation)), type(location)\n self.location = location\n self.notes = [] # type: List[str]\n assert feature_type\n self.type = str(feature_type)\n self._qualifiers = OrderedDict() # type: Dict[str, List[str]]\n self.created_by_antismash = bool(created_by_antismash)\n\n @property\n def strand(self) -> int:\n \"\"\" A simple wrapper to access the location strand directly.\n \"\"\"\n return self.location.strand\n\n def extract(self, sequence: Seq) -> Seq:\n \"\"\" Extracts the section of the given sequence that this feature covers.\n\n Return type is always a Seq, unlike location.extract.\n \"\"\"\n assert isinstance(sequence, Seq)\n return self.location.extract(sequence)\n\n def get_qualifier(self, key: str) -> Optional[Tuple]:\n \"\"\" Fetches a qualifier by key and returns a tuple of items stored under\n that key or None if the key was not present.\n \"\"\"\n qualifier = self._qualifiers.get(key)\n if qualifier:\n return tuple(qualifier)\n return None\n\n def overlaps_with(self, other: Union[\"Feature\", FeatureLocation]) -> bool:\n \"\"\" Returns True if the given feature overlaps with this feature.\n This operation is commutative, a.overlaps_with(b) is equivalent to\n b.overlaps_with(a).\n \"\"\"\n if isinstance(other, Feature):\n location = other.location\n elif isinstance(other, FeatureLocation):\n location = other\n else:\n raise TypeError(\"Container must be a Feature or a FeatureLocation, not %s\" % type(other))\n return (self.location.start in location\n or self.location.end - 1 in location\n or location.start in self.location\n or location.end - 1 in self.location)\n\n def is_contained_by(self, other: Union[\"Feature\", FeatureLocation]) -> bool:\n \"\"\" Returns True if the given feature is wholly contained by this\n feature.\n \"\"\"\n end = self.location.end - 1 # to account for the non-inclusive end\n if isinstance(other, Feature):\n return self.location.start in other.location and end in other.location\n if isinstance(other, FeatureLocation):\n return self.location.start in other and end in other\n raise TypeError(\"Container must be a Feature or a FeatureLocation, not %s\" % type(other))\n\n def to_biopython(self, qualifiers: Dict[str, Any] = None) -> List[SeqFeature]:\n \"\"\" Converts this feature into one or more SeqFeature instances.\n\n Subclasses must manage their own attributes and potential extra\n features.\n \"\"\"\n feature = SeqFeature(self.location, type=self.type)\n quals = self._qualifiers.copy()\n notes = self._qualifiers.get(\"note\", []) + self.notes\n if qualifiers:\n notes += qualifiers.pop(\"note\", [])\n quals.update(qualifiers)\n if notes:\n # sorting helps with consistency and comparison testing\n quals[\"note\"] = sorted(notes)\n if self.created_by_antismash:\n quals[\"tool\"] = [\"antismash\"]\n # sorted here to match the behaviour of biopython\n for key, val in sorted(quals.items()):\n feature.qualifiers[key] = val\n assert isinstance(feature.qualifiers, dict)\n return [feature]\n\n def __lt__(self, other: \"Feature\") -> bool:\n \"\"\" Allows sorting Features by location without key complication \"\"\"\n assert isinstance(other, Feature)\n if self.location.start < other.location.start:\n return True\n elif self.location.start == other.location.start:\n return self.location.end < other.location.end\n return False\n\n def __str__(self) -> str:\n return repr(self)\n\n def __repr__(self) -> str:\n return \"%s(%s)\" % (self.type, self.location)\n\n @staticmethod\n def from_biopython(bio_feature: SeqFeature, feature: \"Feature\" = None,\n leftovers: Dict[str, Any] = None) -> SeqFeature:\n \"\"\" Converts a SeqFeature into a single Feature instance.\n\n Arguments:\n bio_feature: the SeqFeature to convert\n feature: a optional Feature instance to update with the values\n this class tracks\n leftovers: any qualifiers remaining from the original SeqFeature\n that have not been used by any subclass\n\n Returns:\n a Feature instance\n \"\"\"\n if feature is None:\n feature = Feature(bio_feature.location, bio_feature.type)\n if not leftovers:\n assert isinstance(bio_feature.qualifiers, dict)\n leftovers = bio_feature.qualifiers.copy()\n feature.notes = leftovers.pop(\"note\", [])\n if leftovers:\n feature._qualifiers.update(leftovers)\n return feature\n\n @staticmethod\n def make_qualifiers_copy(bio_feature: SeqFeature) -> Dict[str, Any]:\n \"\"\" Makes a shallow copy of a SeqFeature's qualifiers. Only the 'notes'\n key will have a copy taken at a deeper level.\n \"\"\"\n qualifiers = bio_feature.qualifiers.copy()\n if \"note\" in qualifiers:\n qualifiers[\"note\"] = qualifiers[\"note\"].copy()\n return qualifiers\n\n\nclass Gene(Feature):\n \"\"\" A feature representing a Gene (more general than a CDS) \"\"\"\n __slots__ = [\"_pseudo\", \"locus_tag\", \"gene_name\"]\n\n def __init__(self, location, locus_tag=None, gene_name=None, pseudo_gene=False,\n created_by_antismash=False, qualifiers=None):\n super().__init__(location, feature_type=\"gene\",\n created_by_antismash=created_by_antismash)\n self.locus_tag = str(locus_tag) if locus_tag else None\n self.gene_name = str(gene_name) if gene_name else None\n if not self.locus_tag and not self.gene_name:\n raise ValueError(\"Gene instances must have a locus tag or name\")\n self._pseudo = bool(pseudo_gene)\n if self._pseudo:\n assert not created_by_antismash, \"pseudo genes can only come from input files\"\n if qualifiers:\n assert isinstance(qualifiers, dict)\n self._qualifiers.update(qualifiers)\n\n def get_name(self) -> str:\n \"\"\" Returns the locus tag or gene name of the gene, in that order \"\"\"\n return self.locus_tag or self.gene_name\n\n def is_pseudo_gene(self) -> bool:\n \"\"\" Was the gene marked as a pseudo-gene \"\"\"\n return self._pseudo\n\n def to_biopython(self, qualifiers: Dict[str, Any] = None) -> SeqFeature:\n \"\"\" Construct a matching SeqFeature for this Gene \"\"\"\n if not qualifiers:\n qualifiers = {}\n if self.locus_tag:\n qualifiers[\"locus_tag\"] = [self.locus_tag]\n if self.gene_name:\n qualifiers[\"gene\"] = [self.gene_name]\n if self._pseudo:\n qualifiers[\"pseudo\"] = []\n return super().to_biopython(qualifiers)\n\n @staticmethod\n def from_biopython(bio_feature, feature=None, leftovers=None) -> \"Gene\":\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n # grab mandatory qualifiers and create the class\n locus = leftovers.pop(\"locus_tag\", [None])[0]\n name = leftovers.pop(\"gene\", [None])[0]\n pseudo = \"pseudo\" in leftovers\n if pseudo:\n leftovers.pop(\"pseudo\")\n try:\n feature = Gene(bio_feature.location, locus_tag=locus, gene_name=name, pseudo_gene=pseudo)\n except AssertionError:\n print(locus, name, bio_feature.qualifiers)\n raise\n super(Gene, feature).from_biopython(bio_feature, feature=feature, leftovers=leftovers)\n return feature\n\n\nclass ClusterBorder(Feature):\n \"\"\" A feature representing a cluster border \"\"\"\n __slots__ = [\"tool\", \"probability\", \"cutoff\", \"extent\", \"product\", \"rule\",\n \"contig_edge\", \"high_priority_product\"]\n\n def __init__(self, location: FeatureLocation, tool: str, probability: float = None,\n cutoff: int = 0, extent: int = 0,\n product: Optional[str] = None, rule: Optional[str] = None,\n contig_edge: bool = False, high_priority_product: bool = True) -> None:\n super().__init__(location, feature_type=\"cluster_border\",\n created_by_antismash=True)\n # required\n self.tool = str(tool)\n # args with simple defaults\n self.high_priority_product = bool(high_priority_product)\n self.contig_edge = bool(contig_edge)\n self.cutoff = int(cutoff)\n self.extent = int(extent)\n\n # more complicated args\n if product is not None:\n assert isinstance(product, str), type(product)\n self.product = product\n\n # specific to cluster finder\n self.probability = None\n if probability is not None:\n self.probability = float(probability)\n\n # specific to rule-based\n if rule is not None:\n assert isinstance(rule, str), type(rule)\n self.rule = rule\n\n def to_biopython(self, qualifiers=None):\n mine = OrderedDict()\n mine[\"aStool\"] = [self.tool]\n mine[\"contig_edge\"] = [self.contig_edge]\n if self.probability is not None:\n mine[\"probability\"] = [str(self.probability)]\n if self.product:\n mine[\"product\"] = [self.product]\n if self.cutoff:\n mine[\"cutoff\"] = [self.cutoff]\n if self.extent:\n mine[\"extent\"] = [self.extent]\n if self.rule:\n mine[\"rule\"] = [self.rule]\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n @staticmethod\n def from_biopython(bio_feature, feature=None, leftovers=None):\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n\n # grab mandatory qualifiers and create the class\n tool = leftovers.pop(\"aStool\")[0]\n\n # optional\n probability = leftovers.pop(\"probability\", [None])[0]\n cutoff = leftovers.pop(\"cutoff\", [0])[0]\n extent = leftovers.pop(\"extent\", [0])[0]\n rule = leftovers.pop(\"rule\", [None])[0]\n product = leftovers.pop(\"product\", [None])[0]\n contig_edge = leftovers.pop(\"contig_edge\", [\"\"])[0] == \"True\"\n\n feature = ClusterBorder(bio_feature.location, tool, probability=probability,\n cutoff=cutoff, extent=extent, rule=rule, product=product,\n contig_edge=contig_edge)\n\n # grab parent optional qualifiers\n super(ClusterBorder, feature).from_biopython(bio_feature, feature=feature, leftovers=leftovers)\n\n return feature\n\n def __str__(self) -> str:\n return repr(self)\n\n def __repr__(self) -> str:\n return \"ClusterBorder(%s, %s)\" % (self.product, self.location)\n\n\nclass AntismashFeature(Feature):\n \"\"\" A base class for all sub-CDS Antismash features \"\"\"\n __slots__ = [\"domain_id\", \"database\", \"detection\", \"_evalue\", \"label\",\n \"locus_tag\", \"_score\", \"_translation\"]\n\n def __init__(self, location, feature_type):\n super().__init__(location, feature_type, created_by_antismash=True)\n self.domain_id = None\n self.database = None\n self.detection = None\n self._evalue = None # float\n self.label = None\n self.locus_tag = None\n self._score = None # float\n\n self._translation = None\n\n @property\n def translation(self) -> str:\n \"\"\" The amino acid translation of the feature. \"\"\"\n return self._translation\n\n @translation.setter\n def translation(self, translation: str):\n self._translation = str(translation)\n\n @property\n def score(self):\n \"\"\" The bitscore reported by a tool when locating the feature \"\"\"\n return self._score\n\n @score.setter\n def score(self, score):\n self._score = float(score)\n\n @property\n def evalue(self):\n \"\"\" The e-value reported by a tool when locating the feature \"\"\"\n return self._evalue\n\n @evalue.setter\n def evalue(self, evalue):\n self._evalue = float(evalue)\n\n def to_biopython(self, qualifiers=None):\n mine = OrderedDict()\n if self.label:\n mine[\"label\"] = [self.label]\n if self.score is not None:\n mine[\"score\"] = [str(self.score)]\n if self.evalue is not None:\n mine[\"evalue\"] = [str(\"{:.2E}\".format(self.evalue))]\n if self.locus_tag:\n mine[\"locus_tag\"] = [self.locus_tag]\n if self._translation:\n mine[\"translation\"] = [self._translation]\n if self.database:\n mine[\"database\"] = [self.database]\n if self.detection:\n mine[\"detection\"] = [self.detection]\n if self.domain_id:\n mine[\"domain_id\"] = [self.domain_id]\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n\nclass Domain(AntismashFeature):\n \"\"\" A base class for features which represent a domain type \"\"\"\n __slots__ = [\"tool\", \"domain\"]\n\n def __init__(self, location, feature_type):\n super().__init__(location, feature_type)\n self.tool = None\n self.domain = None\n\n def to_biopython(self, qualifiers=None):\n mine = OrderedDict()\n if self.tool:\n mine[\"aSTool\"] = [self.tool]\n if self.domain:\n mine[\"aSDomain\"] = [self.domain]\n if self.domain_id:\n mine[\"aSDomain_id\"] = [self.domain_id]\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n\nclass CDSMotif(Domain):\n \"\"\" A base class for features that represent a motif within a CDSFeature \"\"\"\n __slots__ = [\"motif\"]\n\n def __init__(self, location):\n super().__init__(location, feature_type=\"CDS_motif\")\n self.motif = None\n\n @staticmethod\n def from_biopython(bio_feature, feature=None, leftovers=None):\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n if not feature:\n feature = CDSMotif(bio_feature.location)\n\n feature.motif = leftovers.pop(\"description\", [None])[0]\n return super(CDSMotif, feature).from_biopython(bio_feature, feature, leftovers)\n\n def to_biopython(self, qualifiers=None):\n mine = OrderedDict()\n if self.motif:\n mine[\"motif\"] = [self.motif]\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n\nclass PFAMDomain(Domain):\n \"\"\" A feature representing a PFAM domain within a CDS.\n \"\"\"\n __slots__ = [\"description\", \"db_xref\", \"probability\"]\n\n def __init__(self, location: FeatureLocation, description: str) -> None:\n super().__init__(location, feature_type=\"PFAM_domain\")\n assert isinstance(description, str)\n self.description = description\n self.probability = None\n self.db_xref = [] # type: List[str]\n\n def to_biopython(self, qualifiers=None):\n mine = OrderedDict()\n mine[\"description\"] = self.description\n if self.probability is not None:\n mine[\"probability\"] = [self.probability]\n if self.db_xref:\n mine[\"db_xref\"] = self.db_xref\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n @staticmethod\n def from_biopython(bio_feature, feature=None, leftovers=None):\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n # grab mandatory qualifiers and create the class\n description = leftovers.pop(\"description\")[0]\n feature = PFAMDomain(bio_feature.location, description)\n\n # grab optional qualifiers\n feature.db_xref = leftovers.pop(\"db_xref\", [])\n\n # grab parent optional qualifiers\n super(PFAMDomain, feature).from_biopython(bio_feature, feature=feature, leftovers=leftovers)\n\n return feature\n\n\nclass AntismashDomain(Domain):\n \"\"\" A class to represent a Domain with extra specificities and type information \"\"\"\n __slots__ = [\"domain_subtype\", \"specificity\"]\n\n def __init__(self, location):\n super().__init__(location, feature_type=\"aSDomain\")\n self.domain_subtype = None\n self.specificity = []\n\n def to_biopython(self, qualifiers=None) -> List[SeqFeature]:\n mine = OrderedDict() # type: Dict[str, List[str]]\n if self.domain_subtype:\n mine[\"domain_subtype\"] = [self.domain_subtype]\n if self.specificity:\n mine[\"specificity\"] = self.specificity\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n @staticmethod\n def from_biopython(bio_feature, feature=None, leftovers=None) -> \"AntismashDomain\":\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n # grab mandatory qualifiers and create the class\n feature = AntismashDomain(bio_feature.location)\n\n # grab optional qualifiers\n feature.domain_subtype = leftovers.pop(\"domain_subtype\", [None])[0]\n feature.specificity = list(leftovers.pop(\"specificity\", []))\n\n # grab parent optional qualifiers\n super(AntismashDomain, feature).from_biopython(bio_feature, feature=feature, leftovers=leftovers)\n\n return feature\n\n\nclass CDSFeature(Feature):\n \"\"\" A feature representing a single CDS/gene. \"\"\"\n __slots__ = [\"_translation\", \"protein_id\", \"locus_tag\", \"gene\", \"product\",\n \"transl_table\", \"_sec_met\", \"product_prediction\", \"cluster\", \"_gene_functions\",\n \"unique_id\", \"_nrps_pks\", \"motifs\"]\n\n def __init__(self, location, translation=None, locus_tag=None, protein_id=None,\n product=None, gene=None):\n super().__init__(location, feature_type=\"CDS\")\n # mandatory\n # codon_start\n # db_xref\n self._gene_functions = GeneFunctionAnnotations()\n\n # semi-optional\n self.protein_id = _sanitise_id_value(protein_id)\n self.locus_tag = _sanitise_id_value(locus_tag)\n self.gene = _sanitise_id_value(gene)\n self._translation = None\n if translation is not None:\n self.translation = translation\n\n # optional\n self.product = product\n self.transl_table = None\n self._sec_met = None # SecMetQualifier()\n self._nrps_pks = NRPSPKSQualifier()\n self.product_prediction = [] # TODO: shift into nrps sub section?\n\n self.motifs = []\n\n if not (protein_id or locus_tag or gene):\n raise ValueError(\"CDSFeature requires at least one of: gene, protein_id, locus_tag\")\n\n # runtime-only data\n self.cluster = None\n self.unique_id = None # set only when added to a record\n\n @property\n def gene_functions(self) -> GeneFunctionAnnotations:\n \"\"\" All gene function annotations for the CDS \"\"\"\n return self._gene_functions\n\n @property\n def gene_function(self) -> GeneFunction:\n \"\"\" The likely gene function of the CDS, as determined by all annotated\n gene functions.\n \"\"\"\n return self._gene_functions.get_classification()\n\n @property\n def sec_met(self) -> SecMetQualifier:\n \"\"\" The qualifier containing secondary metabolite information for the\n CDSFeature.\n \"\"\"\n return self._sec_met\n\n @sec_met.setter\n def sec_met(self, sec_met):\n if sec_met is not None and not isinstance(sec_met, SecMetQualifier):\n raise TypeError(\"CDSFeature.sec_met can only be set to an instance of SecMetQualifier\")\n self._sec_met = sec_met\n\n @property\n def nrps_pks(self):\n \"\"\" The NRPSPKSQualifier of the feature \"\"\"\n return self._nrps_pks\n\n @nrps_pks.setter\n def nrps_pks(self, qualifier):\n if qualifier is not None and not isinstance(qualifier, NRPSPKSQualifier):\n raise TypeError(\"CDSFeature.nrps_pks can only be set to an instance of NRPSPKSQualifier\")\n self._nrps_pks = qualifier\n\n @property\n def translation(self) -> str:\n \"\"\" The translation of the CDS, as a string of amino acids \"\"\"\n return self._translation\n\n @translation.setter\n def translation(self, translation: str) -> None:\n assert \"-\" not in translation, \"%s contains - in translation\" % self.get_name()\n self._translation = str(translation)\n\n def get_accession(self) -> str:\n \"Get the gene ID from protein id, gene name or locus_tag, in that order\"\n for val in [self.protein_id, self.gene, self.locus_tag]:\n if val:\n return val\n raise ValueError(\"%s altered to contain no identifiers\" % self)\n\n def get_name(self) -> str:\n \"Get the gene ID from locus_tag, gene name or protein id, in that order\"\n for val in [self.locus_tag, self.gene, self.protein_id]:\n if val:\n return val\n raise ValueError(\"%s altered to contain no identifiers\" % self)\n\n @staticmethod\n def from_biopython(bio_feature: SeqFeature, feature: Feature = None,\n leftovers: Optional[Dict] = None) -> \"CDSFeature\":\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n # grab mandatory qualifiers and create the class\n\n # semi-optional qualifiers\n protein_id = leftovers.pop(\"protein_id\", [None])[0]\n locus_tag = leftovers.pop(\"locus_tag\", [None])[0]\n gene = leftovers.pop(\"gene\", [None])[0]\n if not (gene or protein_id or locus_tag):\n if \"pseudo\" in leftovers or \"pseudogene\" in leftovers:\n locus_tag = \"pseudo_%d\" % int(bio_feature.location.start + 1) # 1-indexed\n else:\n # TODO solve somehow?\n logging.critical(\"CDS feature created from biopython without identifier: %s\", bio_feature)\n raise ValueError(\"CDSFeature requires at least one of: gene, protein_id, locus_tag\")\n translation = leftovers.pop(\"translation\", [None])[0]\n if translation and \"-\" in translation:\n logging.warning(\"Translation for CDS %s (at %s) has a gap. Discarding and regenerating.\",\n locus_tag or protein_id or gene, bio_feature.location)\n translation = None\n\n feature = CDSFeature(bio_feature.location, translation, gene=gene,\n locus_tag=locus_tag, protein_id=protein_id)\n\n # grab optional qualifiers\n feature.product = leftovers.pop(\"product\", [None])[0]\n feature.transl_table = leftovers.pop(\"transl_table\", [None])[0]\n sec_met = leftovers.pop(\"sec_met\", None)\n if sec_met:\n feature.sec_met = SecMetQualifier.from_biopython(sec_met)\n gene_functions = leftovers.pop(\"gene_functions\", [])\n if gene_functions:\n feature.gene_functions.add_from_qualifier(gene_functions)\n feature.product_prediction = leftovers.pop(\"aSProdPred\", [])\n\n # grab parent optional qualifiers\n super(CDSFeature, feature).from_biopython(bio_feature, feature=feature, leftovers=leftovers)\n\n return feature\n\n def to_biopython(self, qualifiers: Dict[str, List[str]] = None) -> SeqFeature:\n mine = OrderedDict() # type: Dict[str, List[str]]\n # mandatory\n mine[\"translation\"] = [self.translation]\n if self.product_prediction:\n mine[\"aSProdPred\"] = [self.product_prediction]\n # optional\n for attr in [\"gene\", \"transl_table\", \"locus_tag\",\n \"protein_id\", \"product\"]:\n val = getattr(self, attr)\n if val:\n mine[attr] = [str(val)]\n if self._gene_functions:\n mine[\"gene_functions\"] = list(map(str, self._gene_functions))\n # since it's already a list\n if self.sec_met:\n mine[\"sec_met\"] = self.sec_met\n # respect qualifiers given to us\n if qualifiers:\n mine.update(qualifiers)\n return super().to_biopython(mine)\n\n def __repr__(self) -> str:\n return str(self)\n\n def __str__(self) -> str:\n return \"CDS(%s, %s)\" % (self.get_name(), self.location)\n\n\nclass Prepeptide(CDSMotif):\n \"\"\" A class representing a prepeptide. Used for tracking a multi-feature\n construction with a leader, core and tail. To allow for multiple types\n of prepeptide (e.g. lanthi- or sacti-peptides), only the core must exist.\n \"\"\"\n def __init__(self, location, peptide_class, core, locus_tag, peptide_subclass=None,\n score=0., monoisotopic_mass=0., molecular_weight=0.,\n alternative_weights=None, leader=\"\", tail=\"\", **kwargs):\n \"\"\"\n Arguments:\n peptide_class: the kind of prepeptide, e.g. 'lanthipeptide', 'thiopeptide'\n core: the sequence of the core\n locus_tag: the locus tag to use for the feature\n prepeptide_subclass: the subclass of the prepeptide, e.g. 'Type II'\n leader: the sequence of the leader, if it exists\n tail: the sequence of the tail, if it exists\n ... other args that will be passed through to CDSFeature\n \"\"\"\n for arg in [peptide_class, core, leader, tail]:\n assert isinstance(arg, str), type(arg)\n self._leader = leader\n self._core = core\n self._tail = tail\n super().__init__(location, **kwargs)\n self.locus_tag = locus_tag\n self.type = \"CDS_motif\"\n self.peptide_class = peptide_class\n if peptide_subclass:\n peptide_subclass = peptide_subclass.replace(\"-\", \" \") # \"Type-II\" > \"Type II\"\n self.peptide_subclass = peptide_subclass\n self.score = float(score)\n self.monoisotopic_mass = float(monoisotopic_mass)\n self.molecular_weight = float(molecular_weight)\n self.alternative_weights = []\n if alternative_weights:\n self.alternative_weights = [float(weight) for weight in alternative_weights]\n\n @property\n def translation(self) -> str:\n return self._leader + self._core + self._tail\n\n @translation.setter\n def translation(self) -> None:\n raise AttributeError(\"Cannot assign to translation in a Prepeptide\")\n\n @property\n def leader(self) -> str:\n \"\"\" The leader sequence of the prepeptide \"\"\"\n return self._leader\n\n @leader.setter\n def leader(self, leader: str) -> None:\n assert isinstance(leader, str)\n self._leader = leader\n\n @property\n def core(self) -> str:\n \"\"\" The core sequence of the prepeptide \"\"\"\n return self._core\n\n @core.setter\n def core(self, core: str) -> None:\n assert isinstance(core, str)\n self._core = core\n\n @property\n def tail(self) -> str:\n \"\"\" The tail sequence of the prepeptide \"\"\"\n return self._tail\n\n @tail.setter\n def tail(self, tail: str) -> None:\n assert isinstance(tail, str)\n self._tail = tail\n\n def get_name(self) -> str:\n \"\"\" Returns the locus tag of the parent CDS.\n\n Uses the same function name as the CDSFeature for consistency.\n \"\"\"\n return self.locus_tag\n\n def to_biopython(self, qualifiers: Dict[str, List] = None) -> List[SeqFeature]:\n \"\"\" Generates up to three SeqFeatures, depending if leader and tail exist.\n Any qualifiers given will be used as a base for all SeqFeatures created.\n \"\"\"\n # calculate core location\n core_start = self.location.start\n core_end = self.location.end\n if self.leader:\n core_start += len(self.leader) * 3\n if self.tail:\n core_end -= len(self.tail) * 3\n core_location = FeatureLocation(core_start, core_end, self.location.strand)\n\n # add qualifiers\n if not qualifiers:\n qualifiers = {'note': []}\n if 'note' not in qualifiers:\n qualifiers['note'] = []\n\n # build features\n features = []\n if self.leader:\n start = self.location.start\n leader_location = FeatureLocation(start, core_location.start, self.location.strand)\n leader = SeqFeature(leader_location, type=\"CDS_motif\", qualifiers={\"note\": []})\n leader.translation = self.leader\n leader.qualifiers['locus_tag'] = [self.locus_tag]\n leader.qualifiers['note'].extend(['leader peptide', self.peptide_class,\n 'predicted leader seq: %s' % self.leader])\n features.append(leader)\n\n core = SeqFeature(core_location, type=\"CDS_motif\", qualifiers=qualifiers)\n core.qualifiers['locus_tag'] = [self.locus_tag]\n core.qualifiers['note'].extend(['core peptide', self.peptide_class,\n 'predicted class: %s' % self.peptide_subclass,\n \"predicted core seq: %s\" % self.core,\n \"score: %0.2f\" % self.score,\n \"molecular weight: %0.1f\" % self.molecular_weight,\n \"monoisotopic mass: %0.1f\" % self.monoisotopic_mass])\n if self.alternative_weights:\n weights = map(lambda x: \"%0.1f\" % x, self.alternative_weights)\n core.qualifiers['note'].append('alternative weights: %s' % \"; \".join(weights))\n\n features.append(core)\n\n if self.tail:\n tail_location = FeatureLocation(core_location.end, self.location.end, self.location.strand)\n tail = SeqFeature(tail_location, type=\"CDS_motif\")\n tail.translation = self.tail\n tail.qualifiers['locus_tag'] = [self.locus_tag]\n tail.qualifiers['note'] = ['tail peptide', self.peptide_class]\n features.append(tail)\n\n return features\n\n def to_json(self) -> Dict:\n \"\"\" Converts the qualifier to a dictionary for storing in JSON results.\n \"\"\"\n data = dict(vars(self))\n for var in [\"_tail\", \"_core\", \"_leader\"]:\n data[var.replace(\"_\", \"\")] = data[var]\n del data[var]\n data[\"location\"] = str(self.location)\n data[\"score\"] = self.score\n return data\n\n\nclass Cluster(Feature):\n \"\"\" A feature representing a cluster. Tracks which CDS features belong to it\"\"\"\n __slots__ = [\"_extent\", \"_cutoff\", \"_products\", \"contig_edge\",\n \"detection_rules\", \"smiles_structure\",\n \"clusterblast\", \"knownclusterblast\", \"subclusterblast\",\n \"parent_record\", \"cds_children\", \"borders\", \"monomers_prediction\"]\n\n def __init__(self, location: FeatureLocation, cutoff: int, extent: int, products: List) -> None:\n super().__init__(location, feature_type=\"cluster\",\n created_by_antismash=True)\n\n self._extent = int(extent)\n self._cutoff = int(cutoff)\n self._products = []\n for product in products:\n self.add_product(product)\n\n self.contig_edge = None # hmm_detection borderpredict\n self.detection_rules = []\n self.smiles_structure = None # SMILES string\n self.monomers_prediction = None\n\n self.clusterblast = None\n self.knownclusterblast = None\n self.subclusterblast = None\n\n # for runtime management\n self.parent_record = None\n self.cds_children = OrderedDict()\n self.borders = []\n\n @property\n def products(self) -> Iterable[str]:\n \"\"\" The products of a cluster \"\"\"\n return tuple(self._products)\n\n def add_product(self, product: str) -> None:\n \"\"\" Add the given product to the cluster's list of products \"\"\"\n assert product and isinstance(product, str), str(product)\n self._products.append(product)\n\n def get_cluster_number(self):\n \"\"\" Returns the cluster number which the parent record uses to refer to\n this cluster. \"\"\"\n if not self.parent_record:\n raise ValueError(\"Cluster not contained in record\")\n return self.parent_record.get_cluster_number(self)\n\n def trim_overlapping(self):\n \"\"\" Shrinks the cluster, where possible, to exclude any features which\n overlap with the edges of the cluster.\n Any feature fully contained before shrinking will still be fully\n contained.\n \"\"\"\n if not self.parent_record:\n logging.warning(\"Trimming cluster which does not belong to a record\")\n return\n features = self.parent_record.get_cds_features_within_location(self.location,\n with_overlapping=True)\n # don't trim if there's no features to trim by\n if not features:\n return\n\n # find the deepest feature that only overlaps at the beginning\n previous = None\n index = 0\n current = features[index]\n # track where to trim to\n start = self.location.start\n while current.overlaps_with(self) and not current.is_contained_by(self):\n start = max([start, current.location.start, current.location.end])\n previous = current\n index += 1\n if index >= len(features):\n current = None\n break\n current = features[index]\n\n # don't cause a contained feature to now overlap only\n if previous and current:\n start = min([start, current.location.start, current.location.end])\n\n # find the deepest feature that only overlaps at the end\n # but skip any indices already covered in the lead search\n lead_index = index\n previous = None\n index = len(features) - 1\n current = features[index]\n # track where to trim to\n end = self.location.end\n while index > lead_index and current.overlaps_with(self) and not current.is_contained_by(self):\n end = min([end, current.location.start, current.location.end])\n previous = current\n index -= 1\n if index < 0:\n current = None\n break\n current = features[index]\n\n # but don't cause a contained feature to now overlap only\n if previous and current:\n end = max([end, current.location.start, current.location.end])\n\n # finally, do the trim itself\n new_loc = FeatureLocation(start, end, self.location.strand)\n if self.location.start != start or self.location.end != end:\n logging.debug(\"Cluster %d trimming location from %s to %s\",\n self.get_cluster_number(), self.location, new_loc)\n # make sure the size is never increased\n assert self.location.start <= start < end <= self.location.end\n self.location = new_loc\n\n for cds in self.cds_children:\n assert cds.is_contained_by(self), \"cluster trimming removed wholly contained CDS\"\n\n def add_cds(self, cds: CDSFeature):\n \"\"\" Adds a CDSFeature to the cluster \"\"\"\n assert isinstance(cds, CDSFeature)\n assert cds.is_contained_by(self), \"cds %s outside cluster %s\" % (cds, self)\n self.cds_children[cds] = None\n\n @property\n def cutoff(self):\n \"\"\" The maximal distance between genes when defining the cluster.\n The distance between core genes after definition will likely be\n smaller.\"\"\"\n return self._cutoff\n\n @cutoff.setter\n def cutoff(self, cutoff):\n if cutoff is not None:\n cutoff = int(cutoff)\n self._cutoff = cutoff\n\n @property\n def extent(self):\n \"\"\" The distance the cluster extends from the first and last genes which\n from which the cluster was defined.\n \"\"\"\n return self._extent\n\n @extent.setter\n def extent(self, extent):\n if extent is not None:\n self._extent = int(extent)\n\n def get_product_string(self):\n \"\"\" Returns the cluster's products as a single string \"\"\"\n assert None not in self._products, self._products\n return \"-\".join(self._products)\n\n @property\n def probability(self) -> Optional[float]:\n \"\"\" The cluster probability, if relevant. \"\"\"\n probabilities = {border.probability for border in self.borders}\n # one border ignores probabilities, then don't use a probability\n if None in probabilities:\n return None\n # if all agree on the probability\n if len(probabilities) == 1:\n return list(probabilities)[0]\n # if they disagree, return None again\n return None\n\n @staticmethod\n def from_biopython(bio_feature, feature=None, leftovers=None):\n if leftovers is None:\n leftovers = Feature.make_qualifiers_copy(bio_feature)\n # grab mandatory qualifiers and create the class\n cutoff = leftovers.pop(\"cutoff\")[0]\n extent = leftovers.pop(\"extension\")[0]\n products = leftovers.pop(\"product\")[0].split(\"-\")\n cluster = Cluster(bio_feature.location, cutoff, extent, products)\n # take the detection rules from \"note\"\n # first check it exists\n index = -1\n for i, note in enumerate(leftovers.get(\"note\", [])):\n if note.startswith(\"Detection rule\"):\n index = i\n break\n # then split it into the relevant pieces\n if index > -1:\n products = []\n rules = []\n text = leftovers[\"note\"].pop(index)\n text = text.split(\":\", 1)[1] # strip the leadin\n text = text.split(\";\") # separate rules\n for rule in text:\n if not rule:\n continue\n assert \": \" in rule, rule\n product, rule = rule.split(\": \")\n rule = rule[1:-1] # strip ( )\n products.append(product.strip())\n rules.append(rule)\n cluster.detection_rules = rules\n\n # grab optional qualifiers\n contig_edge = leftovers.pop(\"contig_edge\", [None])[0]\n if not contig_edge:\n cluster.contig_edge = None\n else:\n cluster.contig_edge = contig_edge == \"True\"\n cluster.smiles_structure = leftovers.pop(\"structure\", None)\n # grab optional parent qualifiers\n super(Cluster, cluster).from_biopython(bio_feature, cluster, leftovers)\n\n return cluster\n\n def to_biopython(self, qualifiers=None):\n mine = OrderedDict()\n mine[\"cutoff\"] = [str(self.cutoff)]\n mine[\"extension\"] = [str(self.extent)]\n if self.contig_edge is not None:\n mine[\"contig_edge\"] = [str(self.contig_edge)]\n assert isinstance(self._products, list), type(self.products)\n mine[\"product\"] = [self.get_product_string()]\n rule_text = [\"Detection rule(s) for this cluster type:\"]\n assert isinstance(self.detection_rules, list), type(self.detection_rules)\n for product, rule in zip(self.products, self.detection_rules):\n rule_text.append(\"%s: (%s);\" % (product, rule))\n rule_text = \" \".join(rule_text)\n if qualifiers:\n mine.update(qualifiers)\n if \"note\" not in mine:\n mine[\"note\"] = []\n mine[\"note\"].append(rule_text)\n return super().to_biopython(mine)\n\n def write_to_genbank(self, filename=None, directory=None, record=None):\n \"\"\" Writes a genbank file containing only the information contained\n within the Cluster.\n \"\"\"\n if not filename:\n filename = \"%s.cluster%03d.gbk\" % (self.parent_record.id, self.get_cluster_number())\n if directory:\n filename = os.path.join(directory, filename)\n\n if record is None:\n record = self.parent_record.to_biopython()\n assert isinstance(record, SeqRecord)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n cluster_record = record[self.location.start:self.location.end]\n\n cluster_record.annotations[\"date\"] = record.annotations.get(\"date\", '')\n cluster_record.annotations[\"source\"] = record.annotations.get(\"source\", '')\n cluster_record.annotations[\"organism\"] = record.annotations.get(\"organism\", '')\n cluster_record.annotations[\"taxonomy\"] = record.annotations.get(\"taxonomy\", [])\n cluster_record.annotations[\"data_file_division\"] = record.annotations.get(\"data_file_division\", 'UNK')\n # our cut-out clusters are always linear\n cluster_record.annotations[\"topology\"] = \"linear\"\n\n seqio.write([cluster_record], filename, 'genbank')\n\n\ndef _sanitise_id_value(name: Optional[str]) -> Optional[str]:\n \"\"\" Ensures a name doesn't contain characters that will break external programs\"\"\"\n if name is None:\n return None\n name = str(name)\n illegal_chars = set(\"!\\\"#$%&()*+,:; \\r\\n\\t=>?@[]^`'{|}/ \")\n for char in set(name).intersection(illegal_chars):\n name = name.replace(char, \"_\")\n return name\n","repo_name":"agolfinos/antismash","sub_path":"antismash/common/secmet/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":42495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"10211563443","text":"import sys\nsys.path.append(\"\") \n\nfrom util.helper import clear, type_writer, colored_text\nfrom caesarcipher_helper import logo, alphabets\n\ndef caesar(message, key, direction):\n result = \"\"\n if direction == \"decode\": key *= -1\n for char in message:\n if char in alphabets:\n pivot = alphabets.index(char) + key\n pivot %= 26\n result += alphabets[pivot]\n else: result += char\n return result\n\nclear()\ntype_writer(colored_text(logo), 0.02)\nflag = \"yes\"\n\nwhile flag == \"yes\":\n direction = input(\"\\nType 'encode' to encode, 'decode' to decode: \").lower()\n\n if direction != \"encode\" and direction != \"decode\":\n print(\"\\nThis is an invalid input. Try again.\")\n continue\n else:\n message = input(\"\\nEnter the message: \")\n key = int(input(\"\\nEnter shift amount: \"))\n print(colored_text(f\"\\n{direction.capitalize()}d message is {caesar(message, key % 26, direction)}\"))\n\n flag = input(\"\\nType 'yes' if you want to go again, else type 'no': \").lower()\n\nclear()\nprint(colored_text(\"\\nGOODBYE\\n\", \"red\"))","repo_name":"ChanduVamsi/100-Days-of-Python","sub_path":"Day8/caesarcipher.py","file_name":"caesarcipher.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12615826193","text":"\"\"\"\nExtra utilities for waffle: most classes are defined in edx_toggles.toggles (https://edx-toggles.readthedocs.io/), but\nwe keep here some extra classes for usage within edx-platform. These classes cover course override use cases.\n\"\"\"\nimport logging\n\nfrom edx_toggles.toggles import WaffleFlag\nfrom opaque_keys.edx.keys import CourseKey\n\nlog = logging.getLogger(__name__)\n\n\nclass CourseWaffleFlag(WaffleFlag):\n \"\"\"\n Represents a single waffle flag that can be forced on/off for a course.\n\n This class should be used instead of WaffleFlag when in the context of a course.\n This class will also respect any org-level overrides, though course-level overrides will take precedence.\n\n Uses a cached waffle namespace.\n\n Usage:\n\n SOME_COURSE_FLAG = CourseWaffleFlag('my_namespace.some_course_feature', __name__, log_prefix='')\n\n And then we can check this flag in code with::\n\n SOME_COURSE_FLAG.is_enabled(course_key)\n\n To configure a course-level override, go to Django Admin \"waffle_utils\" -> \"Waffle flag course overrides\".\n\n Waffle flag: Set this to the flag name (e.g. my_namespace.some_course_feature).\n Course id: Set this to the course id (e.g. course-v1:edx+100+Demo)\n Override choice: (Force on/Force off). \"Force on\" will enable the waffle flag for all users in a course,\n overriding any behavior configured on the waffle flag itself. \"Force off\" will disable the waffle flag\n for all users in a course, overriding any behavior configured on the waffle flag itself. Requires\n \"Enabled\" (see below) to apply.\n Enabled: Must be marked as \"enabled\" in order for the override to be applied. These settings can't be\n deleted, so instead, you need to add another disabled override entry to disable the override.\n\n To configure an org-level override, go to Django Admin \"waffle_utils\" -> \"Waffle flag org overrides\".\n\n Waffle flag: Set this to the flag name (e.g. my_namespace.some_course_feature).\n Org name: Set this to the organization name (e.g. edx)\n Override choice: (Force on/Force off). \"Force on\" will enable the waffle flag for all users in an org's courses,\n overriding any behavior configured on the waffle flag itself. \"Force off\" will disable the waffle flag\n for all users in a org's courses, overriding any behavior configured on the waffle flag itself. Requires\n \"Enabled\" (see below) to apply.\n Enabled: Must be marked as \"enabled\" in order for the override to be applied. These settings can't be\n deleted, so instead, you need to add another disabled override entry to disable the override.\n \"\"\"\n def _get_course_override_value(self, course_key):\n \"\"\"\n Check whether the course flag was overriden.\n\n Returns True/False if the flag was forced on or off for the provided course.\n Returns None if the flag was not overridden.\n\n Note: Has side effect of caching the override value.\n\n Arguments:\n course_key (CourseKey): The course to check for override before checking waffle.\n \"\"\"\n # Import is placed here to avoid model import at project startup.\n from .models import WaffleFlagCourseOverrideModel, WaffleFlagOrgOverrideModel\n\n course_cache_key = f\"{self.name}.cwaffle.{str(course_key)}\"\n course_override = self.cached_flags().get(course_cache_key)\n\n if course_override is None:\n course_override = WaffleFlagCourseOverrideModel.override_value(\n self.name, course_key\n )\n self.cached_flags()[course_cache_key] = course_override\n\n if course_override == WaffleFlagCourseOverrideModel.ALL_CHOICES.on:\n return True\n if course_override == WaffleFlagCourseOverrideModel.ALL_CHOICES.off:\n return False\n\n # Since no course-specific override was found, fall back to checking at the org-level.\n if course_key:\n org = course_key.org\n org_cache_key = f\"{self.name}.owaffle.{org}\"\n org_override = self.cached_flags().get(org_cache_key)\n\n if org_override is None:\n org_override = WaffleFlagOrgOverrideModel.override_value(\n self.name, org\n )\n self.cached_flags()[org_cache_key] = org_override\n\n if org_override == WaffleFlagOrgOverrideModel.ALL_CHOICES.on:\n return True\n if org_override == WaffleFlagOrgOverrideModel.ALL_CHOICES.off:\n return False\n\n return None\n\n def is_enabled(self, course_key=None): # pylint: disable=arguments-differ\n \"\"\"\n Returns whether or not the flag is enabled within the context of a given course.\n\n Arguments:\n course_key (Optional[CourseKey]): The course to check for override before\n checking waffle. If omitted, check whether the flag is enabled\n outside the context of any course.\n \"\"\"\n if course_key:\n assert isinstance(\n course_key, CourseKey\n ), \"Provided course_key '{}' is not instance of CourseKey.\".format(\n course_key\n )\n is_enabled_for_course = self._get_course_override_value(course_key)\n if is_enabled_for_course is not None:\n return is_enabled_for_course\n return super().is_enabled()\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/waffle_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"6845375540","text":"import random\n\ntarget = str(random.randint(0,9999))\nguess = input(\"your guess: \")\ncows = 0\nbulls = 0\nguessses = 0\n\nwhile target != guess:\n for num in enumerate(target): # use enumerate instead of range(len())\n if guess[num[0]] == target[num[0]]:\n cows += 1\n else:\n bulls += 1\n print(f\"{cows} cows, {bulls} bulls\")\n cows = 0\n bulls = 0\n count = 0\n guess = input(\"your guess: \")\n\nprint(\"moooooooooooooooooooooooooooooo\")\n\n\n","repo_name":"ralfseduards/vscode-main","sub_path":"ex/cows.py","file_name":"cows.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30498494341","text":"import torch\nimport numpy as np\nimport torch.optim as optim\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom transformers import AutoTokenizer,get_linear_schedule_with_warmup\nfrom torch.utils.data import Dataset,DataLoader,random_split\nfrom sklearn.metrics import classification_report, f1_score\n\nclass Data(Dataset):\n \"\"\"\n Dataset for parsing sentences and their label sequences\n \"\"\"\n def __init__(self,hparams):\n # self.seq_length = hparams['seq_length']\n self.entity_list = [\n 'B-award',\n 'I-award',\n 'B-conference',\n 'I-conference',\n 'B-department',\n 'I-department',\n 'B-location',\n 'I-location',\n 'B-major',\n 'I-major',\n 'B-name',\n 'I-name',\n 'B-organization',\n 'I-organization',\n 'B-position',\n 'I-position',\n 'B-scholarship',\n 'I-scholarship',\n 'O'\n ]\n \n tag2idx = dict()\n tag2idx['[START]'] = len(tag2idx)\n tag2idx['[END]'] = len(tag2idx)\n tag2idx['[PAD]'] = len(tag2idx)\n\n for item in self.entity_list:\n tag2idx[item] = len(tag2idx)\n\n self.tag2idx = tag2idx\n self.path = hparams['path']\n\n self.bert = False\n if 'bert' in hparams:\n self.bert = True\n self.tokenizer = AutoTokenizer.from_pretrained(hparams['bert'])\n \n self.max_length = hparams['seq_length']\n\n self._parse_file()\n\n def _parse_file(self):\n \"\"\" parse the labeled training file to collect sentences and corresponding labels, and assign them \n as the attributes of self\n\n Args:\n path: string of target file\n \"\"\"\n f = open(self.path,'r',encoding='utf-8')\n sentences = []\n labels = []\n\n sentence = []\n label = []\n\n vocab = {'[PAD]':0}\n # max_length = 0\n\n for i,line in enumerate(f):\n if line == '\\n':\n # append the period at the end of the sentence for better learning the partition\n # sentence.append('[SEP]')\n \n # append the extra labels\n # label.append('O')\n\n sentences.append(sentence)\n labels.append(label)\n\n # if len(sentence) > max_length:\n # max_length = len(sentence)\n \n sentence = []\n label = []\n continue\n \n pair = line.strip().split('\\t')\n sentence.append(pair[0])\n \n if pair[0] not in vocab:\n vocab[pair[0]] = len(vocab)\n\n if len(pair) == 1:\n label.append('O')\n elif len(pair) == 2:\n label.append(pair[1])\n else:\n print(\"Error when spliting a line {}, which is {}\".format(i, line))\n raise ValueError\n \n if sentence:\n # append the period at the end of the sentence for better learning the partition\n # sentence.append('。')\n \n # append the extra labels\n # label.append('O')\n # label.insert(0,'[START]')\n # label.append('[END]')\n\n sentences.append(sentence)\n labels.append(label)\n\n # if len(sentence) > max_length:\n # max_length = len(sentence)\n \n sentence = []\n label = []\n\n self.sentences = sentences\n self.labels = labels\n self.vocab = vocab\n # [SEP] and [CLS]\n # self.max_length = max_length + 2\n\n if self.bert:\n for label in self.labels:\n label.insert(0,'O')\n label.append('O')\n \n def __len__(self):\n return len(self.sentences)\n \n def __getitem__(self,idx):\n back_dict = {}\n sentence = self.sentences[idx]\n \n if self.bert:\n sentence_ = ''.join(sentence)\n encoded_dict = self.tokenizer.encode_plus(sentence_, pad_to_max_length=True, truncation=True, max_length=self.max_length, return_tensors='pt')\n tokens = encoded_dict['input_ids'].squeeze()\n back_dict['attn_mask'] = encoded_dict['attention_mask'].squeeze()\n\n else:\n sentence_ = ''.join(sentence)\n tokens = [self.vocab[word] for word in sentence]\n tokens = tokens[:self.max_length] + [0] * (self.max_length - len(tokens))\n tokens = np.asarray(tokens)\n\n label = [self.tag2idx[i] for i in self.labels[idx]]\n label = label[:self.max_length] + [self.tag2idx['[PAD]']] * (self.max_length - len(label))\n \n back_dict['sentence'] = sentence_\n back_dict['token'] = tokens\n back_dict['label'] = np.asarray(label)\n\n return back_dict\n\ndef my_collate(data):\n \"\"\" \n costomized collate_fn, converting data to list rather than tensor\n \"\"\"\n excluded = ['sentence']\n result = defaultdict(list)\n for d in data:\n for k,v in d.items():\n result[k].append(v)\n for k,v in result.items():\n if k not in excluded:\n result[k] = torch.tensor(v)\n else:\n continue\n return dict(result)\n\ndef prepare(hparams, split=0.9):\n \"\"\" prepare dataset and dataloader for training\n\n Args:\n hparams: dict of hyper parameters\n split: the portion of training set\n \n Returns:\n tag2idx: the map from the name of the tag to the index of it\n loader_train: dataloader for training, without multi-process by default\n \"\"\"\n dataset = Data(hparams)\n\n train_size = int(split * len(dataset))\n val_size = len(dataset) - train_size\n\n dataset_train, dataset_val = random_split(dataset,[train_size, val_size])\n \n loader_train = DataLoader(dataset_train, batch_size=hparams['batch_size'], num_workers=8, drop_last=False, pin_memory=True, shuffle=True)\n loader_val = DataLoader(dataset_val, batch_size=hparams['batch_size'], num_workers=8, drop_last=False, pin_memory=True)\n if dataset.bert:\n attr_dict = {\n 'tag2idx': dataset.tag2idx,\n 'tokenizer': dataset.tokenizer\n }\n else:\n attr_dict = {\n 'tag2idx': dataset.tag2idx,\n 'tokenizer': dataset.vocab\n }\n\n return attr_dict, [loader_train,loader_val]\n\ndef evaluate(model, loader, prt=True):\n \"\"\" evaluate the model by accuracy, recall and f1-score\n\n Args:\n model\n loader: DataLoader\n \n Returns:\n report: accuracy, recall, f1-score printed\n \"\"\"\n with torch.no_grad():\n preds = []\n labels = []\n for i,x in enumerate(loader): \n preds.extend(model(x).flatten().tolist())\n labels.extend(x['label'].flatten().tolist())\n if prt:\n print(classification_report(labels, preds))\n \n return {\n 'macro_f1': round(f1_score(y_true=labels, y_pred=preds, average='macro'),4),\n 'weighted_f1': round(f1_score(y_true=labels, y_pred=preds, average='weighted'),4),\n # 'micro_f1': round(f1_score(y_true=labels, y_pred=preds, average='micro'),4),\n }\n\n\ndef train(hparams, model, loaders, lr=1e-3,schedule=False):\n \"\"\" train the model\n\n Args:\n model\n loader: DataLoader\n \"\"\"\n\n optimizer = optim.AdamW(model.parameters(),lr=lr)\n\n if schedule:\n total_steps = len(loaders[0]) * hparams['epochs']\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps)\n\n for epoch in range(hparams['epochs']):\n tqdm_ = tqdm(enumerate(loaders[0]))\n total_loss = 0\n\n for step,x in tqdm_:\n loss = model.fit(x)\n loss.backward()\n\n # prevent gradient explosion\n # torch.nn.utils.clip_grad_norm_(bert_model.parameters(), 1.0)\n\n optimizer.step()\n optimizer.zero_grad()\n \n if schedule:\n scheduler.step()\n\n total_loss += loss.item()\n tqdm_.set_description(\"epoch {:d} , step {:d} , loss: {:.4f}\".format(epoch+1, step, total_loss/(step+1)))\n \n # print the performance on validation set every epoch\n print(evaluate(model, loaders[1], prt=False))\n \n return model\n\ndef predict(sentence, model, tokenizer, max_length=None):\n \"\"\"\n convert the input sentence to its word ids, then feed it into the model\n \n Args: \n sentence: list of regular string\n model: NER model\n tokenizer: vocab or bert-tokenizer\n max_length: all sentence will be padded/truncated to max_length\n \n Returns:\n result: tagging sequence\n \"\"\"\n idx2tag = {v:k for k,v in model.tag2idx.items()}\n if not max_length:\n max_length = model.seq_length\n\n if hasattr(model, 'bert'):\n sentence = [tokenizer.encode_plus(sent, pad_to_max_length=True, truncation=True, max_length=max_length, return_tensors='pt') for sent in sentence]\n\n token = torch.cat([sent['input_ids'] for sent in sentence], dim=0)\n attn_masks = torch.cat([sent['attention_mask'] for sent in sentence], dim=0)\n\n tag_seq = model({'token':token, 'attn_mask':attn_masks}).tolist()\n tag_seq = [[idx2tag[j] for j in i] for i in tag_seq]\n\n original = [tokenizer.convert_ids_to_tokens(toke) for toke in token]\n\n else:\n original = [[word for word in sent] for sent in sentence]\n\n sentence = [[tokenizer[i] for i in sent] for sent in sentence]\n sentence = [sent + [0] * (max_length - len(sent)) for sent in sentence]\n sentence = torch.tensor(sentence, device=model.device, dtype=torch.long)\n tag_seq = model({'token': sentence})\n tag_seq = [[idx2tag[j] for j in i] for i in tag_seq.tolist()]\n \n for sent,tags in zip(original, tag_seq):\n print('************************')\n for word,tag in zip(sent,tags):\n print(word, tag)","repo_name":"namespace-Pt/NER","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34825337914","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nSCORE_THRESH = 0.3\nSTRIDE_SCALE = 8\nIOU_THRESH = 0.6\n\nclass Integral(nn.Module):\n \"\"\"A fixed layer for calculating integral result from distribution.\n\n This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,\n P(y_i) denotes the softmax vector that represents the discrete distribution\n y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}\n\n Args:\n reg_max (int): The maximal value of the discrete set. Default: 16. You\n may want to reset it according to your new dataset or related\n settings.\n \"\"\"\n\n def __init__(self, reg_max=16):\n super(Integral, self).__init__()\n self.reg_max = reg_max\n self.register_buffer('project',\n torch.linspace(0, self.reg_max, self.reg_max + 1))\n\n def forward(self, x):\n \"\"\"Forward feature from the regression head to get integral result of\n bounding box location.\n\n Args:\n x (Tensor): Features of the regression head, shape (N, 4*(n+1)),\n n is self.reg_max.\n\n Returns:\n x (Tensor): Integral result of box locations, i.e., distance\n offsets from the box center in four directions, shape (N, 4).\n \"\"\"\n x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)\n x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)\n return x\n\n\n\ndef IouCal(Box1, Box2):\n inner_x1 = torch.max(Box1[0], Box2[0])\n inner_y1 = torch.max(Box1[1], Box2[1])\n inner_x2 = torch.min(Box1[2], Box2[2])\n inner_y2 = torch.min(Box1[3], Box2[3])\n area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1)\n area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \\\n (Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \\\n area_inner\n return torch.max(torch.tensor(0.), area_inner / area)\n\ndef nms(Bboxes):\n Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True)\n record_dict = set()\n res = []\n for i in range(len(Bboxes)):\n if i not in record_dict:\n record_dict.add(i)\n res.append(Bboxes[i])\n else:\n continue\n for j in range(i + 1, len(Bboxes)):\n Iou = IouCal(Bboxes[i], Bboxes[j])\n if Iou > IOU_THRESH:\n record_dict.add(j)\n continue\n return res\n\n\ndef gfl_post_process(output, extra_info):\n integral = Integral(16)\n ml_scores, ml_bboxes = output\n scale_factor = extra_info[\"scale_factor\"]\n levels = 5\n total_bboxes = []\n\n for level in range(levels):\n stride = 2**(level)*8\n '''默认输出顺序为 小stride->大stride'''\n feat_h, feat_w = ml_scores[level].shape[2:]\n scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid()\n bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride\n\n for i in range(len(scores)):\n if scores[i] > SCORE_THRESH:\n x = i % int(feat_w) * stride\n y = i // int(feat_w) * stride\n x1 = x - bboxes[i][0]\n y1 = y - bboxes[i][1]\n x2 = x + bboxes[i][2]\n y2 = y + bboxes[i][3]\n score_loc = scores[i]\n box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor)\n total_bboxes.append(torch.cat([box, score_loc], dim=0))\n nmsBoxes = nms(total_bboxes)\n return nmsBoxes\n","repo_name":"HAOCHENYE/yehc_mmdet","sub_path":"tools/grad_cam_tools/gfl_post_process.py","file_name":"gfl_post_process.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25107925423","text":"\"\"\"\nMain driver file.\nHandling user input.\nDisplaying current GameStatus object.\n\"\"\"\nimport pygame as p\nimport ChessEngine, ChessAI\nimport sys\nfrom multiprocessing import Process, Queue\n\nBOARD_WIDTH = BOARD_HEIGHT = 512\nMOVE_LOG_PANEL_WIDTH = 250\nMOVE_LOG_PANEL_HEIGHT = BOARD_HEIGHT\nDIMENSION = 8\nSQUARE_SIZE = BOARD_HEIGHT // DIMENSION\nMAX_FPS = 15\nIMAGES = {}\n\n\ndef loadImages():\n \"\"\"\n Initialize a global directory of images.\n This will be called exactly once in the main.\n \"\"\"\n pieces = ['wp', 'wR', 'wN', 'wB', 'wK', 'wQ', 'bp', 'bR', 'bN', 'bB', 'bK', 'bQ']\n for piece in pieces:\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef main():\n \"\"\"\n The main driver for our code.\n This will handle user input and updating the graphics.\n \"\"\"\n p.init()\n screen = p.display.set_mode((BOARD_WIDTH + MOVE_LOG_PANEL_WIDTH, BOARD_HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n game_state = ChessEngine.GameState()\n valid_moves = game_state.getValidMoves()\n move_made = False # flag variable for when a move is made\n animate = False # flag variable for when we should animate a move\n loadImages() # do this only once before while loop\n running = True\n square_selected = () # no square is selected initially, this will keep track of the last click of the user (tuple(row,col))\n player_clicks = [] # this will keep track of player clicks (two tuples)\n game_over = False\n ai_thinking = False\n move_undone = False\n move_finder_process = None\n move_log_font = p.font.SysFont(\"Arial\", 14, False, False)\n player_one = True # if a human is playing white, then this will be True, else False\n player_two = False # if a hyman is playing white, then this will be True, else False\n\n while running:\n human_turn = (game_state.white_to_move and player_one) or (not game_state.white_to_move and player_two)\n for e in p.event.get():\n if e.type == p.QUIT:\n p.quit()\n sys.exit()\n # mouse handler\n elif e.type == p.MOUSEBUTTONDOWN:\n if not game_over:\n location = p.mouse.get_pos() # (x, y) location of the mouse\n col = location[0] // SQUARE_SIZE\n row = location[1] // SQUARE_SIZE\n if square_selected == (row, col) or col >= 8: # user clicked the same square twice\n square_selected = () # deselect\n player_clicks = [] # clear clicks\n else:\n square_selected = (row, col)\n player_clicks.append(square_selected) # append for both 1st and 2nd click\n if len(player_clicks) == 2 and human_turn: # after 2nd click\n move = ChessEngine.Move(player_clicks[0], player_clicks[1], game_state.board)\n for i in range(len(valid_moves)):\n if move == valid_moves[i]:\n game_state.makeMove(valid_moves[i])\n move_made = True\n animate = True\n square_selected = () # reset user clicks\n player_clicks = []\n if not move_made:\n player_clicks = [square_selected]\n\n # key handler\n elif e.type == p.KEYDOWN:\n if e.key == p.K_z: # undo when 'z' is pressed\n game_state.undoMove()\n move_made = True\n animate = False\n game_over = False\n if ai_thinking:\n move_finder_process.terminate()\n ai_thinking = False\n move_undone = True\n if e.key == p.K_r: # reset the game when 'r' is pressed\n game_state = ChessEngine.GameState()\n valid_moves = game_state.getValidMoves()\n square_selected = ()\n player_clicks = []\n move_made = False\n animate = False\n game_over = False\n if ai_thinking:\n move_finder_process.terminate()\n ai_thinking = False\n move_undone = True\n\n # AI move finder\n if not game_over and not human_turn and not move_undone:\n if not ai_thinking:\n ai_thinking = True\n return_queue = Queue() # used to pass data between threads\n move_finder_process = Process(target=ChessAI.findBestMove, args=(game_state, valid_moves, return_queue))\n move_finder_process.start()\n\n if not move_finder_process.is_alive():\n ai_move = return_queue.get()\n if ai_move is None:\n ai_move = ChessAI.findRandomMove(valid_moves)\n game_state.makeMove(ai_move)\n move_made = True\n animate = True\n ai_thinking = False\n\n if move_made:\n if animate:\n animateMove(game_state.move_log[-1], screen, game_state.board, clock)\n valid_moves = game_state.getValidMoves()\n move_made = False\n animate = False\n move_undone = False\n\n drawGameState(screen, game_state, valid_moves, square_selected)\n\n if not game_over:\n drawMoveLog(screen, game_state, move_log_font)\n\n if game_state.checkmate:\n game_over = True\n if game_state.white_to_move:\n drawEndGameText(screen, \"Black wins by checkmate\")\n else:\n drawEndGameText(screen, \"White wins by checkmate\")\n\n elif game_state.stalemate:\n game_over = True\n drawEndGameText(screen, \"Stalemate\")\n\n clock.tick(MAX_FPS)\n p.display.flip()\n\n\ndef drawGameState(screen, game_state, valid_moves, square_selected):\n \"\"\"\n Responsible for all the graphics within current game state.\n \"\"\"\n drawBoard(screen) # draw squares on the board\n highlightSquares(screen, game_state, valid_moves, square_selected)\n drawPieces(screen, game_state.board) # draw pieces on top of those squares\n\n\ndef drawBoard(screen):\n \"\"\"\n Draw the squares on the board.\n The top left square is always light.\n \"\"\"\n global colors\n colors = [p.Color(\"white\"), p.Color(\"gray\")]\n for row in range(DIMENSION):\n for column in range(DIMENSION):\n color = colors[((row + column) % 2)]\n p.draw.rect(screen, color, p.Rect(column * SQUARE_SIZE, row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef highlightSquares(screen, game_state, valid_moves, square_selected):\n \"\"\"\n Highlight square selected and moves for piece selected.\n \"\"\"\n if (len(game_state.move_log)) > 0:\n last_move = game_state.move_log[-1]\n s = p.Surface((SQUARE_SIZE, SQUARE_SIZE))\n s.set_alpha(100)\n s.fill(p.Color('green'))\n screen.blit(s, (last_move.end_col * SQUARE_SIZE, last_move.end_row * SQUARE_SIZE))\n if square_selected != ():\n row, col = square_selected\n if game_state.board[row][col][0] == (\n 'w' if game_state.white_to_move else 'b'): # square_selected is a piece that can be moved\n # highlight selected square\n s = p.Surface((SQUARE_SIZE, SQUARE_SIZE))\n s.set_alpha(100) # transparency value 0 -> transparent, 255 -> opaque\n s.fill(p.Color('blue'))\n screen.blit(s, (col * SQUARE_SIZE, row * SQUARE_SIZE))\n # highlight moves from that square\n s.fill(p.Color('yellow'))\n for move in valid_moves:\n if move.start_row == row and move.start_col == col:\n screen.blit(s, (move.end_col * SQUARE_SIZE, move.end_row * SQUARE_SIZE))\n\n\ndef drawPieces(screen, board):\n \"\"\"\n Draw the pieces on the board using the current game_state.board\n \"\"\"\n for row in range(DIMENSION):\n for column in range(DIMENSION):\n piece = board[row][column]\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(column * SQUARE_SIZE, row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef drawMoveLog(screen, game_state, font):\n \"\"\"\n Draws the move log.\n\n \"\"\"\n move_log_rect = p.Rect(BOARD_WIDTH, 0, MOVE_LOG_PANEL_WIDTH, MOVE_LOG_PANEL_HEIGHT)\n p.draw.rect(screen, p.Color('black'), move_log_rect)\n move_log = game_state.move_log\n move_texts = []\n for i in range(0, len(move_log), 2):\n move_string = str(i // 2 + 1) + '. ' + str(move_log[i]) + \" \"\n if i + 1 < len(move_log):\n move_string += str(move_log[i + 1]) + \" \"\n move_texts.append(move_string)\n\n moves_per_row = 3\n padding = 5\n line_spacing = 2\n text_y = padding\n for i in range(0, len(move_texts), moves_per_row):\n text = \"\"\n for j in range(moves_per_row):\n if i + j < len(move_texts):\n text += move_texts[i + j]\n\n text_object = font.render(text, True, p.Color('white'))\n text_location = move_log_rect.move(padding, text_y)\n screen.blit(text_object, text_location)\n text_y += text_object.get_height() + line_spacing\n\n\ndef drawEndGameText(screen, text):\n font = p.font.SysFont(\"Helvetica\", 32, True, False)\n text_object = font.render(text, False, p.Color(\"gray\"))\n text_location = p.Rect(0, 0, BOARD_WIDTH, BOARD_HEIGHT).move(BOARD_WIDTH / 2 - text_object.get_width() / 2,\n BOARD_HEIGHT / 2 - text_object.get_height() / 2)\n screen.blit(text_object, text_location)\n text_object = font.render(text, False, p.Color('black'))\n screen.blit(text_object, text_location.move(2, 2))\n\n\ndef animateMove(move, screen, board, clock):\n \"\"\"\n Animating a move\n \"\"\"\n global colors\n d_row = move.end_row - move.start_row\n d_col = move.end_col - move.start_col\n frames_per_square = 10 # frames to move one square\n frame_count = (abs(d_row) + abs(d_col)) * frames_per_square\n for frame in range(frame_count + 1):\n row, col = (move.start_row + d_row * frame / frame_count, move.start_col + d_col * frame / frame_count)\n drawBoard(screen)\n drawPieces(screen, board)\n # erase the piece moved from its ending square\n color = colors[(move.end_row + move.end_col) % 2]\n end_square = p.Rect(move.end_col * SQUARE_SIZE, move.end_row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE)\n p.draw.rect(screen, color, end_square)\n # draw captured piece onto rectangle\n if move.piece_captured != '--':\n if move.is_enpassant_move:\n enpassant_row = move.end_row + 1 if move.piece_captured[0] == 'b' else move.end_row - 1\n end_square = p.Rect(move.end_col * SQUARE_SIZE, enpassant_row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE)\n screen.blit(IMAGES[move.piece_captured], end_square)\n # draw moving piece\n screen.blit(IMAGES[move.piece_moved], p.Rect(col * SQUARE_SIZE, row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n p.display.flip()\n clock.tick(60)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mikolaj-skrzypczak/chess-engine","sub_path":"chess/ChessMain.py","file_name":"ChessMain.py","file_ext":"py","file_size_in_byte":11457,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"3"} +{"seq_id":"30969849684","text":"# -*- coding: utf-8 -*-\nfrom django.test import TestCase\nimport json\nfrom woistbier_rest.models import Kiosk, BeerPrice, Beer\n\nprefix = '/bier/rest/'\n\n\n#create a dummy kiosk\ndef create_dummy_kiosk(street='musterstraße', number=123, city='Dortmund' ):\n kiosk = Kiosk()\n kiosk.street = street\n kiosk.number = number\n kiosk.city = city\n kiosk.save()\n return kiosk\n\n\ndef create_dummy_beer(name='Geiles Bier', brew='Pils', location='Dortmund'):\n beer = Beer(name=name, brew=brew, location=location)\n beer.save()\n return beer\n\n\n'''\nTests for the kioskmodel. \n'''\n\n\nclass KioskModelTest(TestCase):\n\n #suppose the admin want sto change the addres of the kiosk. The name should change as well\n def test_kiosk_save(self):\n #create a new kiosk isntance and save it to the db\n #kiosk = self.post_kiosk_and_get_resulting_kiosk('TestStraße', 12)\n kiosk = create_dummy_kiosk(street=u'TestStraße', number=12)\n self.assertEqual(unicode(kiosk.street).encode('utf-8'), 'TestStraße')\n self.assertEqual(kiosk.number, 12)\n self.assertEqual(unicode(kiosk.name).encode('utf-8'), 'TestStraße' + ' ' + str(12))\n\n #now change street name and number\n kiosk.street = 'AndereStraße'\n kiosk.number = 32\n\n self.assertEqual(kiosk.street, 'AndereStraße')\n self.assertEqual(kiosk.number, 32)\n #the kiosk.name will change on calling the save method\n self.assertNotEqual(unicode(kiosk.name).encode('utf-8'), 'AndereStraße' + str(32))\n \n kiosk.save()\n\n self.assertEqual(kiosk.name, 'AndereStraße' + ' ' + str(32))\n\n\nclass BeerPriceTest(TestCase):\n\n def test_save_beerprice(self):\n #create a dummy object\n kiosk = create_dummy_kiosk()\n beer = create_dummy_beer()\n\n beer_price = BeerPrice()\n beer_price.beer = beer\n beer_price.price = 120\n beer_price.size = 0.5\n beer_price.kiosk = kiosk\n beer_price.save()\n\n self.assertEquals(beer_price.score, beer_price.price/beer_price.size,\n 'Score not correctly computed. Its '+str(beer_price.score)\n + ' but it should be ' + str(beer_price.price/beer_price.size))\n\n","repo_name":"WoIstBier/bier-django","sub_path":"woistbier_rest/tests/model_tests.py","file_name":"model_tests.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31184199479","text":"from flask import Blueprint, request\nfrom classes.jwtManager import decodeJWT\nimport os\nfrom dotenv import load_dotenv, dotenv_values\n\nhome_routes = Blueprint('home_BluePrint', __name__, template_folder='templates')\n\n#Rota para usar a aplicação (no caso uma rota de teste) - primeira rota da aplicação\n@home_routes.route(\"/index\")\ndef index():\n token = str(request.headers['Authorization'])\n token = token.split(' ')[1]\n \n if token:\n retorno = decodeJWT(token)\n if retorno == None:\n return \"Token Inválido ou Expirado, fazer login novamente!\", 401\n else:\n estudio = os.getenv(\"nomeApp\")\n return f\"Logado na API REST do app : {estudio} ; rota /index\", 200\n else:\n return \"Token não fornecido, requisição encerrada\", 401\n","repo_name":"laotsetung/API_REST-JWT_AUTH-Flask","sub_path":"blueprints/home/home_bp.py","file_name":"home_bp.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29047158647","text":"# -*- coding: utf-8 -*-\nimport pytest\nimport datetime\n\nfrom billing.contract_iface.cmeta.helpers import attrdict, attribute\nfrom billing.contract_iface.cmeta.helpers import CollateralTypes as CollateralTypesBase, CollateralAttrs\nfrom billing.contract_iface.contract_meta import collateral_types\nfrom billing.contract_iface.contract_json import JSONContract\n\nfrom balance.mapper import Service, get_contract_json\n\nfrom tests import object_builder as ob\n\nACQUIRING = 'ACQUIRING'\nAFISHA = 'AFISHA'\nDISTRIBUTION = 'DISTRIBUTION'\nGENERAL = 'GENERAL'\nGEOCONTEXT = 'GEOCONTEXT'\nPARTNERS = 'PARTNERS'\nPREFERRED_DEAL = 'PREFERRED_DEAL'\nSPENDABLE = 'SPENDABLE'\n\nNOW = datetime.datetime.now()\n\n\n@pytest.fixture(name='person_category')\ndef create_person_category(session, country=None, **kwargs):\n return ob.PersonCategoryBuilder(country=country or create_country(session), **kwargs).build(session).obj\n\n\n@pytest.fixture(name='country')\ndef create_country(session):\n return ob.CountryBuilder().build(session).obj\n\n\n@pytest.fixture(name='person')\ndef create_person(session, client, type=None):\n if not type:\n type = create_person_category(session).category\n return ob.PersonBuilder(client=client, type=type).build(session).obj\n\n\n@pytest.fixture(name='client')\ndef create_client(session, **kwargs):\n return ob.ClientBuilder(**kwargs).build(session).obj\n\n\ndef create_contract(session, **kwargs):\n contract_params = {'is_signed': None,\n 'finish_dt': None,\n 'dt': NOW}\n contract_params.update(kwargs)\n return ob.ContractBuilder(**contract_params).build(session, ).obj\n\n\ndef create_jc(contract):\n json_body = get_contract_json(contract)\n return JSONContract.loads(json_body)\n\n\ndef set_same_external_id(contracts):\n external_id = contracts[0].create_new_eid()\n for contract in contracts:\n contract.external_id = external_id\n\n\ndef create_request(session, client=None, orders=None, quantity=1, **kwargs):\n if not client:\n client = create_client(session)\n if not orders:\n orders = [create_order(session, client=client)]\n\n return ob.RequestBuilder(\n basket=ob.BasketBuilder(\n rows=[ob.BasketItemBuilder(order=order, quantity=quantity) for order in orders],\n client=client), **kwargs).build(session).obj\n\n\ndef set_unique_external_id(contract):\n contract.external_id = contract.create_new_eid()\n\n\ndef create_contract_type(session, descr=None):\n test_contract_type = 'TEST_CONTRACT_TYPE' + str(ob.get_big_number())\n session.execute('''INSERT INTO T_CONTRACT_TYPES VALUES (:contract_type, :descr)''',\n {'contract_type': test_contract_type, 'descr': descr})\n return test_contract_type\n\n\ndef create_attr(contract_type, name, persistattr=0, pytype='int'):\n contract_attrs = attrdict('code')\n contract_attrs[name.upper()] = attribute(contract_type=contract_type, pytype=pytype, htmltype='refselect',\n source='firms', caption=u'Тестовый атрибут', headattr=1, position=26,\n grp=1, persistattr=persistattr)\n return contract_attrs\n\n\ndef create_collateral_type(contract_type):\n collateral_types[contract_type] = attrdict('id')\n collateral_types[contract_type][1] = CollateralTypesBase(caption=u'Тестовое допсоглашение',\n contract_type=contract_type,\n attributes=[CollateralAttrs(attribute_code='TEST_ATTR')])\n\n\ndef create_order(session, client, service=None, **kwargs):\n if service is None:\n service = ob.Getter(Service, 7).build(session).obj\n product = ob.ProductBuilder().build(session).obj\n return ob.OrderBuilder(client=client, service=service,\n product=product, **kwargs).build(session).obj\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/balance_tests/contract/contract_common.py","file_name":"contract_common.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1114771614","text":"import socket\nimport time\nimport io\nHOST = '104.154.120.223'\nPORT = 8085\n\n\ndef Solver(serverIn, key):\n if('enter the key' in serverIn):\n return(key, key)\n else:\n temp = serverIn[(serverIn.find('cipher:')+ 8):serverIn.find('\\n')]\n temp = temp.split(' ')\n outlist = ''\n for item in temp:\n if(len(item) == 8):\n outlist += item[1]\n elif(len(item) == 11):\n if(ord(item[-1]) > 64 and ord(item[-1]) < 91):\n outlist += item[-4]\n else:\n outlist += item[3]\n else:\n outlist += item[-1]\n print(outlist)\n return('2', outlist)\n\n\n\ndef tcp_client():\n client = socket.socket( socket.AF_INET, socket.SOCK_STREAM)\n client.connect(( HOST, PORT ))\n response = client.recv(4096)\n #print(response + '\\n\\n\\n')\n key = ''\n temp = 0\n with open('chaosLog.txt', 'a') as loggg:\n while 1:\n #time.sleep(.5)\n response = client.recv(4096)\n texty, key = Solver(response, key)\n loggg.write('The:\\n' + response+ '\\n\\n\\n'+key+'\\n\\n\\n')\n time.sleep(2)\n print('\\nTHe length:\\n'+ str(len(texty)) +'\\n\\n\\n')\n client.send(texty)\n\n#38\n\nif __name__ == '__main__':\n tcp_client()","repo_name":"indirektly/CTF_Write_Ups","sub_path":"ISITDTU_2019/chaos/chaos.py","file_name":"chaos.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4970792468","text":"import tensorflow as tf \nimport numpy as np \nimport random\nfrom collections import deque \nimport cv2\n\nimport time\n\n# Hyper Parameters As Specified in Paper:\nBATCH_SIZE = 32\nREPLAY_MEMORY = 1000000\nAGENT_HISTORY = 4 # number of frames in the past our agent sees\nTARGET_UPDATE_FREQ = 10000\nGAMMA = 0.99\nACTION_REPEAT = 4 # for how many frames do we repeat actions\nUPDATE_FREQ = 4\n\nLEARNING_RATE = 0.00025\nGRADIENT_MOMENTUM = 0.95\nFINAL_EPSILON = 0.1# final value of epsilon\nINITIAL_EPSILON = 1.0# # starting value of epsilon\nEXPLORE_FRAMES = 1000000. # frames over which to anneal epsilon\nOBSERVE = 50000. # timesteps to observe before training through experience replay\n#OBSERVE = 1000\nNO_OP_MAX = 30 # number of max frames before we start doing actions\nATARI_NUM = 16 # which is up, down, left, righ diagonals times two because of the button\n\ndef preprocess(observation):\n observation = cv2.cvtColor(cv2.resize(observation, (84, 110)), cv2.COLOR_BGR2GRAY)\n observation = observation[26:110,:]\n ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)\n return np.reshape(observation,(84,84,1))\n\n# epsilon determines our exploration trade off\nclass DQN:\n def __init__(self, init_epsilon=INITIAL_EPSILON, actions=ATARI_NUM, game=\"BREAKOUT\"):\n self.memory = deque()\n self.timeStep = 0\n self.epsilon = init_epsilon\n self.actions = actions # atari defaults\n self.prevAction = 0\n self.rewards = 0\n\n if game == \"BREAKOUT\":\n self.logsdir = './logs/train/1'\n self.modeldir= './model/breakout/'\n elif game == \"SPACE\":\n self.logsdir = './logs/train/2'\n self.modeldir = './model/space/'\n elif game == \"PONG\":\n self.logsdir = './logs/train/3'\n self.modeldir = './model/pong/'\n elif game == \"PACMAN\":\n self.logsdir = './logs/train/4'\n self.modeldir = './model/pacman/'\n\n # notice that there really isn't any dropout\n with tf.name_scope('normal'):\n self.input = tf.placeholder(\"float\", [None, 84, 84, AGENT_HISTORY], name = \"input\") # our image must be converted to greyscale first\n\n with tf.name_scope('conv1'):\n self.weight1_conv = tf.Variable(tf.truncated_normal(shape=[8,8,AGENT_HISTORY, 32], stddev=0.01), name=\"weight1_conv\")\n self.bias1_conv = tf.Variable(tf.constant(0.01, shape=[32]), name=\"bias1_conv\")\n self.conv1 = tf.nn.relu(tf.nn.conv2d(self.input, self.weight1_conv, strides=[1,4,4,1], padding=\"VALID\") + self.bias1_conv)\n with tf.name_scope('conv2'):\n self.weight2_conv = tf.Variable(tf.truncated_normal(shape=[4,4,32,64], stddev=0.01), name=\"weight2_conv\")\n self.bias2_conv = tf.Variable(tf.constant(0.01, shape=[64]), name=\"bias2_conv\")\n self.conv2 = tf.nn.relu(tf.nn.conv2d(self.conv1, self.weight2_conv, strides=[1,2,2,1], padding=\"VALID\") + self.bias2_conv, name=\"conv2\")\n with tf.name_scope('conv3'):\n self.weight3_conv = tf.Variable(tf.truncated_normal(shape=[3,3,64,64], stddev=0.01), name=\"weight3_conv\")\n self.bias3_conv = tf.Variable(tf.constant(0.01, shape=[64]), name=\"bias3_conv\")\n self.conv3 = tf.nn.relu(tf.nn.conv2d(self.conv2, self.weight3_conv, strides=[1,1,1,1], padding=\"VALID\") + self.bias3_conv, name=\"conv3\")\n \n conv3_flat = tf.reshape(self.conv3, [-1, 7*7*64], name='flatten_conv3')\n\n with tf.name_scope('fc4'):\n self.weight4_fc = tf.Variable(tf.truncated_normal(shape=[3136, 512], stddev=0.01), name=\"weight4_fc\")\n self.bias4_fc = tf.Variable(tf.constant(0.01, shape=[512]), name=\"bias4_fc\")\n self.fc4 = tf.nn.relu(tf.matmul(conv3_flat, self.weight4_fc) + self.bias4_fc)\n\n with tf.name_scope('fc5'):\n self.weight5_fc = tf.Variable(tf.truncated_normal(shape=[512, ATARI_NUM], stddev=0.01), name=\"weight5_fc\")\n self.bias5_fc = tf.Variable(tf.constant(0.01, shape=[ATARI_NUM]), name=\"bias5_fc\")\n\n with tf.name_scope('output'): \n self.QValue = (tf.matmul(self.fc4, self.weight5_fc) + self.bias5_fc)\n\n with tf.name_scope('target'):\n self.inputT = tf.placeholder(\"float\", [None, 84, 84, AGENT_HISTORY], name = \"inputT\") # our image must be converted to greyscale first\n\n with tf.name_scope('conv1T'):\n self.weight1_convT = tf.Variable(tf.truncated_normal(shape=[8,8,AGENT_HISTORY, 32], stddev=0.01), name=\"weight1_convT\")\n self.bias1_convT = tf.Variable(tf.constant(0.01, shape=[32]), name=\"bias1_convT\")\n self.conv1T = tf.nn.relu(tf.nn.conv2d(self.inputT, self.weight1_convT, strides=[1,4,4,1], padding=\"VALID\") + self.bias1_convT, name = 'conv1T')\n\n with tf.name_scope('conv2T'):\n self.weight2_convT = tf.Variable(tf.truncated_normal(shape=[4,4,32,64], stddev=0.01), name=\"weight2_convT\")\n self.bias2_convT = tf.Variable(tf.constant(0.01, shape=[64]), name=\"bias2_convT\")\n self.conv2T = tf.nn.relu(tf.nn.conv2d(self.conv1T, self.weight2_convT, strides=[1,2,2,1], padding=\"VALID\") + self.bias2_convT, name=\"conv2T\")\n\n with tf.name_scope('conv3T'):\n self.weight3_convT = tf.Variable(tf.truncated_normal(shape=[3,3,64,64], stddev=0.01), name=\"weight3_convT\")\n self.bias3_convT = tf.Variable(tf.constant(0.01, shape=[64]), name=\"bias3_convT\")\n self.conv3T = tf.nn.relu(tf.nn.conv2d(self.conv2T, self.weight3_convT, strides=[1,1,1,1], padding=\"VALID\") + self.bias3_convT, name=\"conv3T\")\n \n self.conv3_flatT = tf.reshape(self.conv3T, [-1, 7*7*64], name='flatten_conv3T')\n\n with tf.name_scope('fc4T'):\n self.weight4_fcT = tf.Variable(tf.truncated_normal(shape=[3136, 512], stddev=0.01), name=\"weight4_fcT\")\n self.bias4_fcT = tf.Variable(tf.constant(0.01, shape=[512]), name=\"bias4_fcT\")\n self.fc4T = tf.nn.relu(tf.matmul(self.conv3_flatT, self.weight4_fcT) + self.bias4_fcT, name=\"fc1T\")\n\n with tf.name_scope('fc5T'):\n self.weight5_fcT = tf.Variable(tf.truncated_normal(shape=[512, ATARI_NUM], stddev=0.01), name=\"weight5_fcT\")\n self.bias5_fcT = tf.Variable(tf.constant(0.01, shape=[ATARI_NUM]), name=\"bias5_fcT\")\n with tf.name_scope('outputT'):\n self.QValueT = (tf.matmul(self.fc4T, self.weight5_fcT) + self.bias5_fcT)\n\n with tf.name_scope('network'):\n tf.summary.histogram('weights1', self.weight1_conv, collections=['network'])\n tf.summary.histogram('bias1', self.bias1_conv, collections=['network'])\n tf.summary.histogram('weight2', self.weight2_conv, collections=['network'])\n tf.summary.histogram('bias2', self.bias2_conv, collections=['network'])\n tf.summary.histogram('weight3_conv', self.weight3_conv, collections=['network'])\n tf.summary.histogram('bias3_conv', self.bias3_conv, collections=['network'])\n tf.summary.histogram('weight4_fc', self.weight4_fc, collections=['network'])\n tf.summary.histogram('bias4_fc', self.bias4_fc, collections=['network'])\n tf.summary.histogram('weight5_fc', self.weight5_fc, collections=['network'])\n tf.summary.histogram('bias5_fc', self.bias5_fc, collections=['network'])\n self.cost_placeholder = tf.placeholder_with_default(tf.constant(self.rewards), shape=None, name='rewards')\n self.sum_op = tf.summary.scalar('rewards', self.cost_placeholder, collections=['rewards'])\n\n self.copy_tensors = [\n tf.assign(self.weight1_convT, self.weight1_conv), \n tf.assign(self.bias1_convT, self.bias1_conv), \n tf.assign(self.weight2_convT, self.weight2_conv),\n tf.assign(self.bias2_convT, self.bias2_conv),\n tf.assign(self.weight3_convT, self.weight3_conv),\n tf.assign(self.bias3_convT, self.bias3_conv),\n tf.assign(self.weight4_fcT, self.weight4_fc),\n tf.assign(self.bias4_fcT, self.bias4_fc),\n tf.assign(self.weight5_fcT, self.weight5_fc),\n tf.assign(self.bias5_fcT, self.bias5_fc)\n ]\n self.merge_summary0 = tf.summary.merge_all('network')\n self.create_placeholder()\n self.saver = tf.train.Saver()\n self.sess = tf.InteractiveSession()\n self.train_writer = tf.summary.FileWriter(self.logsdir, self.sess.graph)\n self.sess.run(tf.global_variables_initializer())\n\n def copy_to_target(self):\n self.sess.run(self.copy_tensors)\n\n def create_placeholder(self):\n with tf.name_scope('cost'):\n self.action_input = tf.placeholder(\"float\", [None, ATARI_NUM])\n self.yInput = tf.placeholder(\"float\", [None])\n Q_Action = tf.reduce_sum(tf.multiply(self.QValue, self.action_input), reduction_indices = 1)\n self.cost = tf.reduce_mean(tf.square(self.yInput - Q_Action), name=\"cost\")\n #self.action_placeholder = tf.placeholder_with_default(tf.argmax(self.action_input, axis = 1), shape=[None,], name='action_space')\n # self.merge_summary = tf.summary.merge([\n # tf.summary.histogram('action_space', self.action_placeholder, collections=['action']),\n # tf.summary.scalar('costs', self.cost_placeholder, collections=['train'])\n # ])\n\n with tf.name_scope('train'):\n self.trainStep = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(self.cost)\n\n def train_network(self):\n minibatch = random.sample(self.memory,BATCH_SIZE)\n state_batch = [data[0] for data in minibatch]\n action_batch= [data[1] for data in minibatch]\n reward_batch = [data[2] for data in minibatch]\n nextState_batch = [data[3] for data in minibatch]\n\n # Step 2: calculate y \n y_batch = []\n QValue_batch = self.QValueT.eval(feed_dict={self.inputT:nextState_batch})\n for i in range(0,BATCH_SIZE):\n terminal = minibatch[i][4]\n if terminal:\n y_batch.append(reward_batch[i])\n else:\n y_batch.append(reward_batch[i] + GAMMA * np.max(QValue_batch[i]))\n\n start = time.time()\n # _, summary, summary2 = self.sess.run([self.trainStep, self.merge_summary, self.merge_summary0], feed_dict={\n # self.yInput : y_batch,\n # self.action_input : action_batch,\n # self.input : state_batch\n # })\n self.sess.run([self.trainStep], feed_dict={\n self.yInput : y_batch,\n self.action_input : action_batch,\n self.input : state_batch\n })\n end = time.time()\n #print(end - start, 'network stuff') # summary ops are double what i have what else could be slowing it down\n\n # self.train_writer.add_summary(summary, self.timeStep)\n # self.train_writer.add_summary(summary2, self.timeStep)\n\n # save network every 100000 iteration\n if self.timeStep % 100000 == 0:\n self.saver.save(self.sess, self.modeldir, global_step = self.timeStep)\n\n # Update our thing every 10000\n if self.timeStep % TARGET_UPDATE_FREQ == 0:\n print('Copying to target')\n self.copy_to_target()\n\n def set_perception(self,obsv, action, reward, terminal):\n newState = np.append(obsv,self.currentState[:,:,1:],axis = 2)\n one_hot_action = np.zeros(ATARI_NUM)\n one_hot_action[action] = 1\n self.rewards += reward\n self.memory.append((self.currentState,one_hot_action,reward,newState,terminal))\n\n if len(self.memory) > REPLAY_MEMORY:\n self.memory.popleft()\n if self.timeStep > OBSERVE:\n # training step\n self.train_network()\n\n if self.timeStep % 100 == 0:\n state = \"\"\n if self.timeStep <= OBSERVE:\n state = \"observe\"\n elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE_FRAMES:\n state = \"explore\"\n else:\n state = \"train\"\n print (\"TIMESTEP\", self.timeStep, \"/ STATE\", state, \\\n \"/ EPSILON\", self.epsilon)\n\n self.currentState = newState\n self.timeStep += 1 \n\n def get_action(self):\n QValue = self.sess.run(self.QValue, feed_dict= {self.input: [self.currentState]})\n action = self.prevAction\n\n # time to change\n if self.timeStep % ACTION_REPEAT == 0:\n if random.random() <= self.epsilon:\n action = random.randrange(0, self.actions)\n else:\n action = np.argmax(QValue)\n # change episilon\n if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:\n self.epsilon -= (INITIAL_EPSILON- FINAL_EPSILON)/EXPLORE_FRAMES\n\n if action >= self.actions or action < 0:\n action = 0\n self.prevAction = action\n return action\n\n def initState(self, observation):\n self.currentState = np.stack([observation] * 4, axis = 2)\n self.currentState = np.reshape(self.currentState, [84, 84 , AGENT_HISTORY])\n\n def load(self):\n latest_checkpoint = tf.train.latest_checkpoint(self.modeldir)\n if latest_checkpoint:\n print(\"Loading model checkpoint {}...\\n\".format(latest_checkpoint))\n self.saver.restore(self.sess, latest_checkpoint)\n def done_writer(self, num):\n summary2 = self.sess.run(self.merge_summary0)\n self.train_writer.add_summary(self.sess.run(self.sum_op, feed_dict={self.cost_placeholder : self.rewards}), num)\n #self.train_writer.add_summary(summary, self.timeStep)\n self.train_writer.add_summary(summary2, self.timeStep)\n self.rewards = 0\n","repo_name":"Airconaaron/tf-rl-basics","sub_path":"tf-dqn/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":13951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41028432185","text":"import functools\nimport inspect\nimport os\nimport warnings\nfrom collections.abc import Iterable\n\nimport numpy as np\nimport numpy.lib.recfunctions as rf\nimport scipy.sparse as sparse\nfrom docrep import DocstringProcessor\n\n__all__ = [\n 'Docorator',\n 'PrintableList',\n 'PrintableDict',\n 'HealthDict',\n 'NestedDict',\n 'flat_list',\n 'sanitize_dict',\n 'methods_to_table',\n 'models_to_table',\n 'ignore_warnings',\n 'is_symmetric',\n 'is_valid_propname',\n 'is_transient',\n 'get_mixture_model_args',\n 'dict_to_struct',\n 'struct_to_dict',\n 'get_printable_props',\n 'get_printable_labels',\n]\n\n\nclass Docorator(DocstringProcessor):\n \"\"\"OpenPNM's customized docstring processor.\"\"\"\n\n __instance__ = None\n\n def __new__(cls, *args, **kwargs):\n if Docorator.__instance__ is None:\n Docorator.__instance__ = DocstringProcessor()\n\n # Add custom parameter type sections\n a = DocstringProcessor.param_like_sections\n Docorator.__instance__.param_like_sections = a + [] # [\"Attributes\", \"Settings\"]\n # Add custom text type sections\n a = Docorator.__instance__.text_sections\n Docorator.__instance__.text_sections = a + []\n\n # Create a single list of all section types\n a = Docorator.__instance__.param_like_sections\n b = Docorator.__instance__.text_sections\n Docorator.__instance__.all_sections = a + b\n\n return Docorator.__instance__\n\n\nclass PrintableList(list):\n r\"\"\"\n Simple subclass of ``list`` that has nice printing. Only works flat lists.\n\n Examples\n --------\n >>> from openpnm.utils import PrintableList\n >>> temp = ['item1', 'item2', 'item3']\n >>> print(PrintableList(temp))\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n 1 : item1\n 2 : item2\n 3 : item3\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n\n Each line contains the result of ``print(item)`` on each item in the list\n\n \"\"\"\n\n def __str__(self):\n horizontal_rule = \"―\" * 78\n lines = [horizontal_rule]\n self.sort()\n for i, item in enumerate(self):\n lines.append(\"{0:<5s} : {1}\".format(str(i + 1), item))\n lines.append(horizontal_rule)\n return \"\\n\".join(lines)\n\n # def __repr__(self): # pragma: no cover\n # return self.__str__()\n\n\nclass PrintableDict(dict):\n r\"\"\"\n Simple subclass of ``dict`` that has nicer printing.\n\n Examples\n --------\n >>> from openpnm.utils import PrintableDict\n >>> from numpy import array as arr\n >>> d = {'item1': 1, 'item2': '1', 'item3': [1, 1], 'item4': arr([1, 1])}\n >>> print(PrintableDict(d))\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n Key Value\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n item1 1\n item2 1\n item3 [1, 1]\n item4 (2,)\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n\n If the item is a Numpy array the value column will contain the items'\n shape, otherwise it will contain the result of ``print(item)``\n\n \"\"\"\n\n def __init__(self, *args, key=\"Key\", value=\"Value\", **kwargs):\n self._value = value\n self._key = key\n super().__init__(*args, **kwargs)\n\n # def __repr__(self): # pragma: no cover\n # return self.__str__()\n\n def __str__(self):\n header = \"―\" * 78\n lines = [header, \"{0:<35s} {1}\".format(self._key, self._value), header]\n for item in list(self.keys()):\n if item.startswith('_'):\n continue\n if isinstance(self[item], np.ndarray):\n lines.append(\"{0:<35s} {1}\".format(item, np.shape(self[item])))\n else:\n lines.append(\"{0:<35s} {1}\".format(item, self[item]))\n lines.append(header)\n return \"\\n\".join(lines)\n\n\nclass NestedDict(dict):\n \"\"\"Brief explanation of 'NestedDict'\"\"\"\n\n def __init__(self, mapping={}, delimiter=\"/\"):\n super().__init__()\n self.delimiter = delimiter\n self.update(mapping)\n self.unravel()\n\n def __setitem__(self, key, value):\n path = key.split(self.delimiter, 1)\n if len(path) > 1:\n if path[0] not in self.keys():\n self[path[0]] = NestedDict(delimiter=self.delimiter)\n self[path[0]][path[1]] = value\n else:\n super().__setitem__(key, value)\n\n def __missing__(self, key):\n self[key] = NestedDict(delimiter=self.delimiter)\n return self[key]\n\n def unravel(self):\n for item in self.keys():\n self[item] = self.pop(item)\n\n def to_dict(self, dct=None):\n if dct is None:\n dct = self\n plain_dict = dict()\n for key in dct.keys():\n value = dct[key]\n if hasattr(value, \"keys\"):\n plain_dict[key] = self.to_dict(value)\n else:\n plain_dict[key] = value\n return plain_dict\n\n def keys(self, dicts=True, values=True):\n k = list(super().keys())\n new_keys = []\n for item in k:\n if hasattr(self[item], \"keys\"):\n if dicts:\n new_keys.append(item)\n else:\n if values:\n new_keys.append(item)\n return new_keys\n\n def __str__(self):\n def print_level(self, p=\"\", indent=\"-\"):\n for item in self.keys():\n if hasattr(self[item], \"keys\"):\n p = print_level(self[item], p=p, indent=indent + indent[0])\n elif indent[-1] != \" \":\n indent = indent + \"\"\n p = indent + item + \"\\n\" + p\n return p\n\n p = print_level(self)\n return p\n\n\nclass HealthDict(PrintableDict):\n r\"\"\"\n This class adds a 'health' check to a standard dictionary.\n\n This check looks into the dict values, and considers empty lists as\n healthy and all else as unhealthy. If one or more entries is\n 'unhealthy' the health method returns False.\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def _get_health(self):\n health = True\n for item in list(self.keys()):\n try:\n if len(self[item]) > 0:\n health = False\n except TypeError:\n if self[item]:\n health = False\n return health\n\n health = property(fget=_get_health)\n\n def __bool__(self):\n return self.health\n\n\ndef flat_list(input_list):\n r\"\"\"\n Given a list of nested lists of arbitrary depth, returns a single\n level or 'flat' list.\n \"\"\"\n def _flatten(l):\n for el in l:\n if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):\n yield from _flatten(el)\n else:\n yield el\n\n return list(_flatten(input_list))\n\n\ndef sanitize_dict(input_dict):\n r\"\"\"\n Given a nested dictionary, ensures that all nested dicts are normal\n Python dicts. This is necessary for pickling, or just converting\n an 'auto-vivifying' dict to something that acts normal.\n \"\"\"\n plain_dict = dict()\n for key in input_dict.keys():\n value = input_dict[key]\n if hasattr(value, \"keys\"):\n plain_dict[key] = sanitize_dict(value)\n else:\n plain_dict[key] = value\n return plain_dict\n\n\ndef methods_to_table(obj):\n r\"\"\"\n Converts a methods on an object to a ReST compatible table\n\n Parameters\n ----------\n obj : Base\n Any object that has a methods\n params : bool\n Indicates whether or not to include a list of parameter\n values in the table. Set to False for just a list of models, and\n True for a more verbose table with all parameter values.\n\n \"\"\"\n parent = obj.__class__.__mro__[1]\n temp = inspect.getmembers(parent, predicate=inspect.isroutine)\n parent_funcs = [i[0] for i in temp if not i[0].startswith(\"_\")]\n\n temp = inspect.getmembers(obj.__class__, predicate=inspect.isroutine)\n obj_funcs = [i[0] for i in temp if not i[0].startswith(\"_\")]\n funcs = set(obj_funcs).difference(set(parent_funcs))\n\n row = \"+\" + \"-\" * 22 + \"+\" + \"-\" * 49 + \"+\"\n fmt = \"{0:1s} {1:20s} {2:1s} {3:47s} {4:1s}\"\n lines = []\n lines.append(row)\n lines.append(fmt.format(\"|\", \"Method\", \"|\", \"Description\", \"|\"))\n lines.append(row.replace(\"-\", \"=\"))\n for i, item in enumerate(funcs):\n try:\n s = getattr(obj, item).__doc__.strip()\n end = s.find(\"\\n\")\n if end > 47:\n s = s[:44] + \"...\"\n lines.append(fmt.format(\"|\", item, \"|\", s[:end], \"|\"))\n lines.append(row)\n except AttributeError:\n pass\n return \"\\n\".join(lines)\n\n\ndef models_to_table(obj, params=True):\n r\"\"\"\n Converts a all the models on an object to a ReST compatible table\n\n Parameters\n ----------\n obj : Base\n Any object that has a ``models`` attribute\n params : bool\n Indicates whether or not to include a list of parameter\n values in the table. Set to False for just a list of models, and\n True for a more verbose table with all parameter values.\n\n \"\"\"\n if not hasattr(obj, \"models\"):\n raise Exception(\"Received object does not have any models\")\n row = \"+\" + \"-\" * 4 + \"+\" + \"-\" * 22 + \"+\" + \"-\" * 18 + \"+\" + \"-\" * 26 + \"+\"\n fmt = \"{0:1s} {1:2s} {2:1s} {3:20s} {4:1s} {5:16s} {6:1s} {7:24s} {8:1s}\"\n lines = []\n lines.append(row)\n lines.append(\n fmt.format(\"|\", \"#\", \"|\", \"Property Name\", \"|\", \"Parameter\", \"|\", \"Value\", \"|\")\n )\n lines.append(row.replace(\"-\", \"=\"))\n for i, item in enumerate(obj.models.keys()):\n prop = item\n if len(prop) > 20:\n prop = item[:17] + \"...\"\n temp = obj.models[item].copy()\n model = str(temp.pop(\"model\")).split(\" \")[1]\n lines.append(\n fmt.format(\"|\", str(i + 1), \"|\", prop, \"|\", \"model:\", \"|\", model, \"|\")\n )\n lines.append(row)\n if params:\n for param in temp.keys():\n p1 = param\n if len(p1) > 16:\n p1 = p1[:13] + \"...\"\n p2 = str(temp[param])\n if len(p2) > 24:\n p2 = p2[:21] + \"...\"\n lines.append(fmt.format(\"|\", \"\", \"|\", \"\", \"|\", p1, \"|\", p2, \"|\"))\n lines.append(row)\n return \"\\n\".join(lines)\n\n\ndef ignore_warnings(warning=RuntimeWarning):\n r\"\"\"\n Decorator for catching warnings. Useful in pore-scale models where\n nans are inevitable, and numpy gets annoying by throwing lots of\n RuntimeWarnings.\n\n Parameters\n ----------\n warning : Warning\n Python warning type that you want to temporarily ignore\n\n Examples\n --------\n >>> from openpnm.utils import ignore_warnings\n >>> @ignore_warnings()\n ... def myfun(x):\n ... return 1/x\n\n >>> import numpy as np\n >>> x = np.arange(5)\n >>> myfun(x)\n array([ inf, 1. , 0.5 , 0.33333333, 0.25 ])\n\n \"\"\"\n\n def _ignore_warning(function):\n @functools.wraps(function)\n def __ignore_warning(*args, **kwargs):\n with warnings.catch_warnings(record=True):\n # Catch all warnings of this type\n warnings.simplefilter(\"always\", warning)\n # Execute the function\n result = function(*args, **kwargs)\n return result\n\n return __ignore_warning\n\n return _ignore_warning\n\n\ndef is_symmetric(a, rtol=1e-10):\n r\"\"\"\n Is ``a`` a symmetric matrix?\n\n Parameters\n ----------\n a : ndarray or sparse matrix\n Object to check for being a symmetric matrix.\n rtol : float\n Relative tolerance with respect to the smallest entry in ``a``\n that is used to determine if ``a`` is symmetric.\n\n Returns\n -------\n bool\n ``True`` if ``a`` is a symmetric matrix, ``False`` otherwise.\n\n \"\"\"\n if not isinstance(a, np.ndarray) and not sparse.issparse(a):\n raise Exception(\"'a' must be either a sparse matrix or an ndarray.\")\n if a.shape[0] != a.shape[1]:\n raise Exception(\"'a' must be a square matrix.\")\n\n atol = np.amin(np.absolute(a.data)) * rtol\n if sparse.issparse(a):\n issym = False if ((a - a.T) > atol).nnz else True\n elif isinstance(a, np.ndarray):\n issym = False if np.any((a - a.T) > atol) else True\n\n return issym\n\n\ndef get_mixture_model_args(\n phase,\n composition='xs',\n args={\n 'mus': 'pore.viscosity',\n 'MWs': 'param.molecular_weight',\n }\n):\n r\"\"\"\n This is used in tests to run models generically\n \"\"\"\n from openpnm.models.phase.misc import mole_to_mass_fraction\n vals = {}\n if composition in ['ws']:\n temp = np.vstack(list(mole_to_mass_fraction(phase=phase).values()))[:, 0]\n vals[composition] = temp\n else:\n temp = np.vstack(list(phase['pore.mole_fraction'].values()))[:, 0]\n vals[composition] = temp\n for item in args.keys():\n temp = np.vstack(list(phase.get_comp_vals(args[item]).values()))[:, 0]\n vals[item] = temp\n return vals\n\n\ndef dict_to_struct(d):\n r\"\"\"\n Converts a dictionary of numpy arrays to a numpy struct\n\n Parameters\n ----------\n d : dict\n A dictionary wtih numpy arrays in each key. The arrays must be all\n the same size.\n\n Returns\n -------\n s : numpy struct\n A numpy struct with the fields or names take from the dictionary keys\n \"\"\"\n struct = rf.unstructured_to_structured(np.vstack(list(d.values())).T,\n names=list(d.keys()))\n return struct\n\n\ndef struct_to_dict(s):\n r\"\"\"\n Converts a numpy struct array into a dictionary using the struct labels as\n keys\n\n Parameters\n ----------\n s : numpy struct\n The struct array\n\n Returns\n -------\n d : dict\n A dictionary with the struct labels or fields as the keys\n \"\"\"\n d = {}\n for key in s.dtype.names:\n d[key] = s[key]\n return d\n\n\ndef get_printable_props(item, suffix='', hr=78*'―'):\n r\"\"\"\n This function is used by the __str__ methods on all classes to get a\n nicely formatted list of properties on the object.\n\n Parameters\n ----------\n item : dict\n The OpenPNM dictionary object with each dictionary key containing a\n numpy array\n suffix : str, optional\n If provided, this will be attached to the end of every dictionary\n key so that 'pore.viscosity' becomes 'pore.viscosity.phase_01'. This\n is a workaround to enhance the printing of component information on\n mixtures.\n hr : str, optional\n The horizontal rule to use between the table heading and body\n\n Returns\n -------\n table : str\n A formatted string that will output a 78 character wide table when\n printed\n\n Notes\n -----\n The table returned by this function only contains items that are numerical\n arrays. Any boolean arrays are ignored.\n\n See Also\n --------\n get_printable_labels\n\n \"\"\"\n if suffix and not suffix.startswith('.'):\n suffix = '.' + suffix\n header = [' ']*78\n header[2] = '#'\n header[5:15] = 'Properties'\n header[-12:] = 'Valid Values'\n lines = ''.join(header) + '\\n' + hr\n i = 0\n for k, v in item.items():\n if (v.dtype != bool) and not ('._' in k):\n i += 1\n s = [' ']*78\n s[:3] = str(i+1).rjust(3)\n prop = k + suffix\n s[5:5+len(prop)] = prop\n element = k.split('.', 1)[0]\n nans = np.any(np.isnan(np.atleast_2d(v.T)), axis=0)\n valid = str(np.sum(~nans)) + ' / ' + str(item._count(element))\n s[-20:] = valid.rjust(20)\n a = ''.join(s)\n lines = '\\n'.join((lines, a))\n return lines\n\n\ndef get_printable_labels(item, suffix='', hr=78*'―'):\n r\"\"\"\n This function is used by the __str__ methods on all classes to get a\n nicely formatted list of labels on the object.\n\n Parameters\n ----------\n item : dict\n The OpenPNM dictionary object with each dictionary key containing a\n numpy array\n suffix : str, optional\n If provided, this will be attached to the end of every dictionary\n key so that 'pore.viscosity' becomes 'pore.viscosity.phase_01'. This\n is a workaround to enhance the printing of component information on\n mixtures.\n hr : str, optional\n The horizontal rule to use between the table heading and body\n\n Returns\n -------\n table : str\n A formatted string that will output a 78 character wide table when\n printed\n\n Notes\n -----\n The table returned by this function only contains items that boolean\n arrays. Any numerical arrays are ignored.\n\n See Also\n --------\n get_printable_props\n \"\"\"\n if suffix and not suffix.startswith('.'):\n suffix = '.' + suffix\n header = [' ']*78\n header[2] = '#'\n header[5:11] = 'Labels'\n header[-18:] = 'Assigned Locations'\n lines = ''.join(header) + '\\n' + hr\n i = 0\n for k, v in item.items():\n if (v.dtype == bool) and not ('._' in k):\n i += 1\n s = [' ']*78\n s[:3] = str(i+1).rjust(3)\n prop = k + suffix\n s[5:5+len(prop)] = prop\n valid = str(np.sum(v))\n s[-12:] = valid.rjust(12)\n a = ''.join(s)\n lines = '\\n'.join((lines, a))\n return lines\n\n\ndef is_transient(algorithms):\n # check that algorithms is a list\n if type(algorithms) is not list:\n algorithms = [algorithms]\n # return True if any algorithm is transient\n for alg in algorithms:\n if hasattr(alg, 'soln'):\n soln_type = type(alg.soln[alg.settings['quantity']])\n if 'TransientSolution' in str(soln_type):\n return True\n return False\n\n\ndef is_valid_propname(propname):\n r\"\"\"\n Checks if ``propname`` is a valid OpenPNM propname, i.e. starts with\n 'pore.' or 'throat.'\n\n Parameters\n ----------\n propname : str\n Property name to check whether it's a valid OpenPNM propname.\n\n Returns\n -------\n bool\n Whether or not ``propname`` is a valid name\n\n \"\"\"\n if not isinstance(propname, str):\n return False\n temp = propname.split(\".\")\n if temp[0] not in [\"pore\", \"throat\"]:\n return False\n if len(temp) == 1:\n return False\n for field in temp:\n if len(field) == 0:\n return False\n return True\n\n\ndef nbr_to_str(nbr, t_precision=12):\n r\"\"\"\n Converts a scalar into a string in scientific (exponential) notation\n without the decimal point.\n\n Parameters\n ----------\n nbr : scalar\n The number to be converted into a scalar.\n t_precision : integer\n The time precision (number of decimal places). Default value is 12.\n\n Returns\n -------\n num : str\n The string represenation of the given number in scientific notation\n \"\"\"\n from decimal import Decimal as dc\n n = int(-dc(str(round(nbr, t_precision))).as_tuple().exponent\n * (round(nbr, t_precision) != int(nbr)))\n nbr_str = (str(int(round(nbr, t_precision)*10**n)) + ('e-'+str(n))*(n != 0))\n return nbr_str\n","repo_name":"PMEAL/OpenPNM","sub_path":"openpnm/utils/_misc.py","file_name":"_misc.py","file_ext":"py","file_size_in_byte":20412,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"3"} +{"seq_id":"14431345013","text":"import unittest\nfrom please.lang_config.dpr import DprFreePascalConfig, DprDelphiConfig\nfrom please.lang_config import utils\n\nclass DprLinuxConfiguratorTest(unittest.TestCase):\n def test_is_garbage(self):\n extensions_list = [\".BPG\", \".BPL\", \".CFG\", \".DCP\", \".DCU\", \".DDP\", \".DFM\", \".~DF\", \".DOF\", \".DPK\", \".~DP\", \".DSK\",\n \".DSM\", \".DCI\", \".DRO\", \".DMT\", \".DCT\", \".OBJ\", \".~PA\", \".RES\", \".RC\", \".TODO\", \".BAK\", \".bpg\", \".bpl\", \n \".cfg\", \".dcp\", \".dcu\", \".ddp\", \".dfm\", \".~df\", \".dof\", \".dpk\", \".~dp\", \".dsk\", \".dsm\", \".dci\", \n \".dro\", \".dmt\", \".dct\", \".obj\", \".~pa\", \".res\", \".rc\", \".todo\", \".bak\"]\n \n linux = DprFreePascalConfig('sample_file.dpr')\n win = DprDelphiConfig('sample_file.dpr')\n \n #simple extension test\n for extension in extensions_list :\n self.assertTrue(linux.is_compile_garbage(\"project\" + extension))\n self.assertFalse(linux.is_compile_garbage(\"\\windows\\documents\\sample_file\"))\n self.assertFalse(linux.is_compile_garbage(\"\\windows\\documents\\sample_file.dpr\"))\n #case tests\n self.assertFalse(win.is_compile_garbage(\"\\windows\\documents\\sample_file.Dpr\"))\n self.assertFalse(win.is_compile_garbage(\"\\windows\\documents\\sample_file.dPR\"))\n self.assertFalse(win.is_compile_garbage(\"\\windows\\documents\\sample_file.DPR\")) \n \n def test_get_compile_command(self) :\n #linux command test\n linux = DprFreePascalConfig('project.dpr')\n self.assertEqual(([\"fpc\", \"-Mdelphi\", \"project.dpr\"],), linux.compile_commands)\n #self.assertEqual([\"./project\"], linux.run_command)\n #windows command test\n win = DprDelphiConfig('project.dpr')\n self.assertEqual(([\"dcc32.exe\", \"-cc\", \"project.dpr\"],), win.compile_commands)\n #self.assertEqual([\"project.exe\"], win.run_command) # hard decisions in utils\n \nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"parallel-p/please","sub_path":"please/lang_config/dpr_test.py","file_name":"dpr_test.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"29031400627","text":"import pytest\n\nfrom datetime import timedelta\n\nfrom wiki.grids.utils import dummy_request_for_grids, insert_rows, row_change_action\nfrom wiki.notifications.generators import GridEdition as EditionGen\nfrom wiki.notifications.models import PageEvent\nfrom wiki.notifications.queue import Queue\nfrom wiki.subscriptions.logic import create_subscription\nfrom wiki.utils import timezone\n\npytestmark = [pytest.mark.django_db]\n\n\ndef test_add_column(client, wiki_users, test_grid, test_org_ctx):\n create_subscription(wiki_users.chapson, test_grid)\n create_subscription(wiki_users.asm, test_grid)\n\n inserted_indexes = insert_rows(\n test_grid,\n [\n {'src': 'source1', 'dst': 'destination1'},\n {'src': 'source2'},\n {'src': 'source3', 'dst': 'destination2', 'staff': 'chapson'},\n ],\n dummy_request_for_grids(),\n )\n test_grid.save()\n for idx in inserted_indexes:\n # Создаст новое событие (или обновит существующее)\n row_change_action[PageEvent.EVENT_TYPES.create](\n test_grid.access_data[test_grid.access_idx[idx]],\n wiki_users.thasonic,\n test_grid,\n )\n\n # Нужно обновить таймаут на событиях, чтобы они попали в очередь\n PageEvent.objects.all().update(timeout=timezone.now() - timedelta(hours=1))\n\n new_events = Queue().new_events(test_grid.id)\n generator = EditionGen()\n result = generator.generate(new_events, {})\n assert len(result) == 2\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/wiki_tests/unit_pytest/notifications/test_grid_edition.py","file_name":"test_grid_edition.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19894586904","text":"from bot import main_color, add_log\nimport nextcord as nc\nfrom nextcord.ext import commands\n\n\nclass Main(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.has_permissions(administrator=True)\n @nc.slash_command(description=\"Позволяет забанить участника\")\n async def ban(self, interaction: nc.Interaction, member: nc.Member, reason='нету'):\n add_log(interaction.user.name, 'ban')\n try:\n if member != interaction.user:\n await member.ban(reason=reason)\n embed = nc.Embed(title=f'{member} был забанен к хуям!',\n description=f'> **по причине:** {reason}', color=main_color)\n await interaction.channel.send(embed=embed)\n else:\n embed = nc.Embed(title='Ошибка!', description='> Вы не можете забанить самого себя!', color=main_color)\n await interaction.followup.send(embed=embed)\n except nc.DiscordServerError:\n embed = nc.Embed(title='Ошибка!', description=f'> ты долбоёб', color=main_color)\n await interaction.followup.send(embed=embed)\n\n @commands.has_permissions(administrator=True)\n @nc.slash_command(description=\"Позволяет разбанить пользователя\")\n async def unban(self, interaction: nc.Interaction, id, reason='нету'):\n user = await self.bot.fetch_user(id)\n try:\n add_log(interaction.user.name, 'unban')\n await interaction.guild.unban(user)\n embed = nc.Embed(title=f'{user} разбанен!', description=f'> **по причине:** {reason}',\n color=int(main_color))\n await interaction.channel.send(embed=embed)\n except nc.DiscordServerError:\n embed = nc.Embed(title='Ошибка!', description=f'> Не удалось разбанить {user}', color=int(main_color))\n await interaction.channel.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(Main(bot))\n","repo_name":"cub-has-injected/polybot","sub_path":"cogs/moderation/bans.py","file_name":"bans.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25769713684","text":"from EvCar import ThaiEvCar\n\ndef display_option():\n print(\"Welcome to ThaiEvCar Data Store System (VDSS)\")\n print(\"1.เพิ่มข้อมูล ev_car\")\n print(\"3.แก้ไขข้อมูล ev_carprice\")\n print(\"2.ลบข้อมูล ev_car\")\n print(\"4.แสดงข้อมูลทั้งหมด ev_car_\")\n\n select = int(input(\"select (1-4)?: \"))\n if select == 1:\n input_ThaiEvCar_data()\n elif select == 2:\n delete_ThaiEvCar()\n elif select == 3:\n edit_ThaiEvCar_price()\n elif select == 4:\n display_option()\n elif select == 5:\n print(\"Good Bye.\")\n exit(0)\n else:\n print(\"Pleaes, select number 1-5\")\n\ndef input_ThaiEvCar_data():\n car_id = input(\"Enter ThaiEvCar car_id: \")\n model = input(\"Enter ThaiEvCar model: \")\n brand = input(\"Enter ThaiEvCar brand: \")\n acceleration = input(\"Enter ThaiEvCar acceleration: \")\n rang = input(\"Enter ThaiEvCar rang: \")\n price = float(input(\"Enter ThaiEvCar price: \"))\n\n ThaiEvCar.my_ThaiEvCar.append(ThaiEvCar(car_id,model, brand,acceleration,rang,price))\n print(\"\\n-------------------------------\")\n print(\"Already add ThaiEvCar to store.\")\n print(\"-------------------------------\\n\")\n\ndef display_ThaiEvCar():\n if len(ThaiEvCar.my_ThaiEvCar) ==0:\n print(\"Yot had no vehicle data.\")\n else:\n print(f'You had {len(ThaiEvCar.my_vehicle)} following.')\n n = 1 # count\n for x in ThaiEvCar.my_ThaiEvCar:\n print(f'[{n}]:',end=\" \")\n x.ThaiEvCar_detail()\n n = n+1\n print(\"\\n\")\n\ndef delete_ThaiEvCar():\n delete_ThaiEvCar() # display all data in list\n if len(ThaiEvCar.my_ThaiEvCar) != (1001,2002,3003 ):\n s = int(input(\"Select to delete?: \"))\n ThaiEvCar.delete_ThaiEvCar(ThaiEvCar, s - 1001,2002,3003)\n print(\"\\n-------------------------------\")\n print(\"Your data has been deleted.\")\n print(\"-------------------------------\\n\")\n\ndef edit_ThaiEvCar_price():\n display_ThaiEvCar()\n if len(ThaiEvCar.my_ThaiEvCar) != 0:\n s = int(input(\"Select to edit?: \"))\n\n print(f'Previous price: {ThaiEvCar.my_ThaiEvCar[s-1].price}')\n new_price = float(input(\"New price: \"))\n ThaiEvCar.edit_ThaiEvCar_price(ThaiEvCar,s-1,new_price)\n\ns = 0\nwhile s == 0:\n display_option()","repo_name":"wirayamind45/OOP_364411760008","sub_path":"oop_mid_exam_2566/EvCarApp.py","file_name":"EvCarApp.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9960034998","text":"import datetime\r\nimport json \r\nfrom typing import Optional\r\nimport decimal\r\n\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.db.utils import IntegrityError\r\nfrom django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse\r\nfrom django.shortcuts import render\r\nfrom django.http.request import HttpRequest\r\nfrom django.urls.base import reverse\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.db.models.query import QuerySet\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\nfrom .models import Employee, Leave, User\r\n\r\nMANAGER = 2\r\nEMPLOYEE = 1\r\nPENDING_STATUS = \"pending\"\r\nAPPROVED_STATUS = \"approved\"\r\nREJECTED_STATUS = \"rejected\"\r\n\r\n# Create your views here.\r\n@login_required(login_url=\"login\")\r\ndef index(request: HttpRequest):\r\n \"\"\"Shows the dashboard like view\"\"\"\r\n current_employee: Employee = Employee.objects.get(employee=request.user)\r\n if current_employee.hierarchy == MANAGER:\r\n context = manager_index_context(current_employee)\r\n else: \r\n # context for employees\r\n context = employee_index_context(current_employee)\r\n\r\n return render(request=request, template_name=\"leave/index.html\", context=context)\r\n\r\ndef employee_index_context(current_employee: Employee):\r\n \"\"\"Returns the context for displaying employee main page\"\"\"\r\n return {\r\n \"is_manager\": current_employee.hierarchy == MANAGER,\r\n \"leave_count\": current_employee.leave_count,\r\n \"superior_list\": current_employee.superior.all()\r\n }\r\n\r\ndef manager_index_context(current_employee: Employee):\r\n \"\"\"Returns the context for displaying manager main page\"\"\"\r\n employee_list: QuerySet = current_employee.employee.subordinates.all()\r\n\r\n all_employees = [employee.name() for employee in Employee.objects.filter(hierarchy__exact=EMPLOYEE) if employee not in employee_list]\r\n approved = [leave.serialize() for employee in employee_list for leave in employee.leaves.all().filter(status__exact=APPROVED_STATUS)]\r\n pending = [leave.serialize() for employee in employee_list for leave in employee.leaves.all().filter(status__exact=PENDING_STATUS)]\r\n \r\n return {\r\n \"is_manager\": current_employee.hierarchy == MANAGER,\r\n \"all_employees\": all_employees,\r\n \"employee_list\": employee_list,\r\n \"approved_leaves\": approved,\r\n \"pending_leaves\": pending\r\n }\r\n\r\n@login_required(login_url=\"login\")\r\ndef apply_leave(request: HttpRequest):\r\n \"\"\"\r\n Processes information from apply leave form \r\n startDate, startTime, endDate, endTime, superior\r\n \"\"\"\r\n employee: Employee = Employee.objects.get(employee=request.user)\r\n\r\n startDate = [int(i) for i in request.POST['startDate'].split(sep=\"-\")]\r\n startTime = 0 if request.POST['startTime'] == \"AM\" else 12\r\n endDate = [int(i) for i in request.POST['endDate'].split(sep=\"-\")]\r\n endTime = 0 if request.POST['endTime'] == \"AM\" else 12\r\n\r\n # create datetime object (received a naive datetime)\r\n start = datetime.datetime(*startDate, startTime)\r\n end = datetime.datetime(*endDate, endTime) + datetime.timedelta(hours=12)\r\n\r\n # create superior object \r\n try:\r\n superior = Employee.objects.get(employee=User.objects.get(username=request.POST['superior']))\r\n except User.DoesNotExist:\r\n superior = None \r\n print(start, end, superior)\r\n\r\n # if existing leave, dont create new leave\r\n try:\r\n existing_leave: Leave = Leave.objects.get(employee=employee, start=start, end=end) \r\n except Leave.DoesNotExist:\r\n # create leave entry \r\n new_leave = Leave(employee=employee, start=start, end=end)\r\n new_leave.save()\r\n\r\n return HttpResponseRedirect(reverse(viewname=\"index\"))\r\n\r\n@csrf_exempt\r\n@login_required(login_url=\"login\")\r\ndef approve_reject_leave(request: HttpRequest):\r\n \"\"\"Updates database when manager approves or rejects leave\"\"\"\r\n if request.method == \"PUT\":\r\n data: dict = json.loads(request.body)\r\n print(data)\r\n leave: Leave = Leave.objects.get(pk=data['leave_id'])\r\n\r\n if data['action'] == \"approve\":\r\n status = APPROVED_STATUS \r\n # updates database\r\n leave.status = status\r\n leave.save()\r\n\r\n # update leave count of employee if approved\r\n leave.employee.leave_count -= decimal.Decimal(leave.num_days_taken())\r\n print(leave.employee.leave_count)\r\n leave.employee.save()\r\n elif data['action'] == \"reject\":\r\n status = REJECTED_STATUS \r\n # updates database\r\n leave.status = status\r\n leave.save()\r\n\r\n return JsonResponse({\"success\": True}) \r\n else:\r\n return JsonResponse({\"error\": \"PUT request required.\"}, status=400)\r\n\r\ndef approved_leaves_list(request: HttpRequest):\r\n \"\"\"Return JSON of approved leaves\"\"\"\r\n if request.method == \"GET\":\r\n current_employee: Employee = Employee.objects.get(employee=request.user)\r\n employee_list: QuerySet = current_employee.employee.subordinates.all()\r\n approved = [leave.serialize() for employee in employee_list for leave in employee.leaves.all().filter(status__exact=APPROVED_STATUS)]\r\n print(approved)\r\n return JsonResponse(approved, safe=False) \r\n else:\r\n return JsonResponse({\"error\": \"GET request required.\"}, status=400) \r\n\r\n@csrf_exempt\r\n@login_required(login_url=\"login\")\r\ndef add_employee_to_superior(request: HttpRequest):\r\n \"\"\"Allows superior to add employee\"\"\"\r\n if request.method == \"PUT\":\r\n data: dict = json.loads(request.body)\r\n print(request.user, data)\r\n\r\n # add employee to superior \r\n try:\r\n new_employee: Employee = Employee.objects.get(employee=User.objects.get(username=data['employee']))\r\n except User.DoesNotExist:\r\n return JsonResponse({\"success\": False})\r\n new_employee.superior.add(request.user)\r\n new_employee.save()\r\n \r\n return JsonResponse({\"success\": True}) \r\n else:\r\n return JsonResponse({\"error\": \"PUT request required.\"}, status=400)\r\n\r\ndef login_view(request: HttpRequest):\r\n \"\"\"Displays login page\"\"\"\r\n if request.method == \"POST\":\r\n # Attempt to sign user in\r\n username = request.POST[\"username\"]\r\n password = request.POST[\"password\"]\r\n user: Optional[User] = authenticate(request, username=username, password=password)\r\n\r\n # Check if authentication successful\r\n if user is not None:\r\n login(request, user)\r\n return HttpResponseRedirect(reverse(\"index\"))\r\n else:\r\n return render(request, \"leave/login.html\", {\r\n \"message\": \"Invalid email and/or password.\"\r\n })\r\n else:\r\n return render(request, \"leave/login.html\")\r\n\r\ndef logout_view(request: HttpRequest):\r\n \"\"\"View for users to logout\"\"\"\r\n logout(request)\r\n return HttpResponseRedirect(reverse(\"index\")) \r\n\r\ndef register(request: HttpRequest):\r\n \"\"\"Register for a new account\"\"\"\r\n if request.method == \"POST\":\r\n email = request.POST[\"email\"]\r\n username = request.POST[\"username\"]\r\n\r\n # Ensure password matches confirmation\r\n password = request.POST[\"password\"]\r\n confirmation = request.POST[\"confirmation\"]\r\n is_manager = request.POST.get('is_manager', False)\r\n \r\n if password != confirmation:\r\n return render(request=request, template_name=\"leave/register.html\", context={\r\n \"message\": \"Passwords must match.\"\r\n })\r\n\r\n # Attempt to create new user\r\n try:\r\n user: User = User.objects.create_user(username, email, password)\r\n Employee(employee=user, hierarchy=2 if is_manager else 1).save()\r\n user.save()\r\n except IntegrityError as e:\r\n print(e)\r\n return render(request, \"leave/register.html\", {\r\n \"message\": \"Email address already taken.\"\r\n })\r\n login(request, user)\r\n return HttpResponseRedirect(reverse(\"index\"))\r\n else:\r\n return render(request, \"leave/register.html\")\r\n\r\n","repo_name":"loheesong/leave","sub_path":"leave/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41299697002","text":"#######################################################################\n#\n# Converter for Enigma2\n# Coded by shamann (c)2013\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n#######################################################################\n\nfrom Renderer import Renderer\nfrom enigma import ePixmap\nfrom Tools.Directories import fileExists, SCOPE_SKIN_IMAGE, SCOPE_CURRENT_SKIN, resolveFilename\nfrom enigma import eServiceReference\nfrom Components.config import config\n\nclass g16RefTV(Renderer):\n\n\tdef __init__(self):\n\t\tRenderer.__init__(self)\n\t\tself.nameCache = { }\n\t\tself.pngname = \"\"\n\n\tGUI_WIDGET = ePixmap\n\n\tdef changed(self, what):\n\t\tif self.instance:\n\t\t\tpngname = \"\"\n\t\t\tif what[0] != self.CHANGED_CLEAR:\n\t\t\t\tservice = self.source.service\n\t\t\t\tmarker = (service.flags & eServiceReference.isMarker == eServiceReference.isMarker)\n\t\t\t\tbouquet = (service.flags & eServiceReference.flagDirectory == eServiceReference.flagDirectory)\n\t\t\t\tif marker:\n\t\t\t\t\tpngname = self.nameCache.get(\"marker\", \"\")\n\t\t\t\t\tif pngname == \"\":\n\t\t\t\t\t\ttmp = resolveFilename(SCOPE_CURRENT_SKIN, \"marker.png\")\n\t\t\t\t\t\tif fileExists(tmp):\n\t\t\t\t\t\t\tpngname = tmp\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpngname = resolveFilename(SCOPE_SKIN_IMAGE, \"skin_default/picon_default.png\")\n\t\t\t\t\t\tself.nameCache[\"marker\"] = pngname\n\t\t\t\telif bouquet:\n\t\t\t\t\tpngname = self.nameCache.get(\"bouquet\", \"\")\n\t\t\t\t\tif pngname == \"\":\n\t\t\t\t\t\ttmp = resolveFilename(SCOPE_CURRENT_SKIN, \"bouquet.png\")\n\t\t\t\t\t\tif fileExists(tmp):\n\t\t\t\t\t\t\tpngname = tmp\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpngname = resolveFilename(SCOPE_SKIN_IMAGE, \"skin_default/picon_default.png\")\n\t\t\t\t\t\tself.nameCache[\"bouquet\"] = pngname\n\t\t\t\telse:\n\t\t\t\t\tsname = service.toString()\n\t\t\t\t\tif sname is not None and sname != \"\":\n\t\t\t\t\t\tif '4097:0' in sname:\n\t\t\t\t\t\t\tsname = sname.replace('rtmpe','').replace('4097:0:1','').replace('4097:0:2','').replace('4097:0:0','')\n\t\t\t\t\t\t\tsname = sname.replace('1:0:1:1:1:0:820000:0:0:0:http%3a//127.0.0.1%3a4050/rtp/','').replace('%3a','').replace('.','').replace(' ','').replace(':0:0:0:0:0:0:0:','').replace('/','').replace('http','').replace('rtmp','').replace('rtsp','')\n\t\t\t\t\t\tpos = sname.rfind(':')\n\t\t\t\t\t\tif pos != -1:\n\t\t\t\t\t\t\tsname = sname[:pos].rstrip(':').replace(':','_')\n\t\t\t\t\t\tpngname = self.nameCache.get(sname, \"\")\n\t\t\t\t\t\tif pngname == \"\":\n\t\t\t\t\t\t\tpngname = self.findPicon(sname)\n\t\t\t\t\t\t\tif pngname != \"\":\n\t\t\t\t\t\t\t\tself.nameCache[sname] = pngname\n\t\t\tif pngname == \"\":\n\t\t\t\tpngname = self.nameCache.get(\"default\", \"\")\n\t\t\t\tif pngname == \"\":\n\t\t\t\t\tpngname = self.findPicon(\"picon_default\")\n\t\t\t\t\tif pngname == \"\":\n\t\t\t\t\t\ttmp = resolveFilename(SCOPE_CURRENT_SKIN, \"picon_default.png\")\n\t\t\t\t\t\tif fileExists(tmp):\n\t\t\t\t\t\t\tpngname = tmp\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpngname = resolveFilename(SCOPE_SKIN_IMAGE, \"skin_default/picon_default.png\")\n\t\t\t\t\tself.nameCache[\"default\"] = pngname\n\t\t\tif self.pngname != pngname:\n\t\t\t\tself.pngname = pngname\n\t\t\t\tself.instance.setPixmapFromFile(self.pngname)\n \t\t\n\tdef findPicon(self, serviceName):\n\t\ttry:\n\t\t\tpngname = config.plugins.setupGlass16.par39.value + \"/picon/\" + serviceName + \".png\"\n\t\t\tif fileExists(pngname):\n\t\t\t\treturn pngname\n\t\texcept: pass\n\t\treturn \"\"\n","repo_name":"bruco72/Metropolis2HD","sub_path":"usr/lib/enigma2/python/Components/Renderer/g16RefTV.py","file_name":"g16RefTV.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6824170273","text":"import datetime\nfrom db_helper import WordDetail\nfrom typing import List\nimport random\n\n\nclass PageGenerator:\n def __init__(self, words: List[WordDetail], date: datetime.date = None):\n if date:\n self.date = date\n delta = 0\n else:\n self.date = datetime.datetime.now(\n datetime.timezone(datetime.timedelta(hours=8)))\n delta = 1 if self.date.time() < datetime.time(4, 0, 0) else 0\n\n self.timestamp = '{:%Y%m%d}'.format(self.date.date() - datetime.timedelta(delta))\n\n self.words = words\n random_int = random.randint(0, 2 ** 16)\n self.interpretation_class = '{}_{}'.format('interpretation', str(random_int))\n self.details_class = '{}_{}'.format('details', str(random_int))\n\n def gen_md(self, output_path=None):\n output_path = output_path or '{}.md'.format(self.timestamp)\n content = ''.join([self._gen_head(), self._gen_body()])\n style = \"\"\"\n\"\"\"\n script = (\"\"\"\n\\n\"\"\")\n content = style + content + script\n with open(output_path, 'w') as f:\n f.write(content)\n return output_path, content\n\n def _gen_head(self):\n date_str = '{} [{}]'.format(self.timestamp, len(self.words))\n return f\"# {date_str} \\n\"\n\n def _gen_body(self):\n items = [self._gen_item_html(i) for i in self.words]\n\n return '\\n'.join(items)\n\n def _gen_item_html(self, word: WordDetail, highlight_word=True):\n phrase_list = word.phrase_list\n if not phrase_list:\n return f'Not found the phrase of:{word.word} \\n'\n phrase_choose = max(phrase_list, key=lambda x: len(x[0]))\n en, ch = phrase_choose\n en = en.replace(word.word, f'{word.word}')\n i = \"\"\"\n
\n

{word}

\n

{phonetic}

\n

{interpretation}

\n
\n
\n {en}\n {ch}\n
\n
\n\"\"\".format(word=word.word, en=en, ch=ch,\n phonetic=word.pronunciation, interpretation=word.interpretation,\n inter_class=self.interpretation_class, detail_class=self.details_class)\n return i\n","repo_name":"JokinYang/MomoReviewHelper","sub_path":"page_maker.py","file_name":"page_maker.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"2971942156","text":"\ndef binary_search(nums, x):\n\n bottom, top = 0, len(nums) - 1\n\n while bottom <= top:\n\n middle = (top + bottom) // 2\n\n if nums[middle] == x:\n return middle\n\n elif nums[middle] < x:\n bottom = middle + 1\n\n else:\n top = middle - 1\n\n return -1\n\ndef binary_search2(nums, x, bottom=None, top=None):\n\n if bottom == None or top == None:\n return binary_search2(nums, x, 0, len(nums) - 1)\n\n elif top >= bottom:\n\n middle = (top + bottom) // 2\n\n if nums[middle] == x:\n return middle\n\n elif nums[middle] < x:\n return binary_search2(nums, x, middle + 1, top)\n\n return binary_search2(nums, x, bottom, middle - 1)\n\n return -1\n\ndef main():\n\n nums = sorted([3, 5, 657, 23, 5, 9, 2, 54, 22, 10, 14, 20, 42, 32, 15, 11])\n \n print(binary_search(nums, 10))\n print(\"------\")\n print(binary_search2(nums, 10))\n\nif __name__ == '__main__':\n main()\n","repo_name":"sshh12/SchoolCode","sub_path":"Algorithms/SortingSeaching/BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"22739974827","text":"# -*- coding:utf-8 -*-\n\n# 数据集:《机器学习》--西瓜数据集4.0\n\n# 算法流程:《机器学习》--k均值算法 图9.2\n\n\nimport math\nimport numpy as np\nimport pylab as pl\n\n# 数据集:每三个是一组分别是西瓜的编号,密度,含糖量\nintialData = \"\"\"\n1,0.697,0.46,2,0.774,0.376,3,0.634,0.264,4,0.608,0.318,5,0.556,0.215,\n6,0.403,0.237,7,0.481,0.149,8,0.437,0.211,9,0.666,0.091,10,0.243,0.267,\n11,0.245,0.057,12,0.343,0.099,13,0.639,0.161,14,0.657,0.198,15,0.36,0.37,\n16,0.593,0.042,17,0.719,0.103,18,0.359,0.188,19,0.339,0.241,20,0.282,0.257,\n21,0.748,0.232,22,0.714,0.346,23,0.483,0.312,24,0.478,0.437,25,0.525,0.369,\n26,0.751,0.489,27,0.532,0.472,28,0.473,0.376,29,0.725,0.445,30,0.446,0.459\"\"\"\n\n\n# 定义一个西瓜类,三个属性,分别是编号,密度,含糖率\n\nclass Watermelon:\n def __init__(self, properties):\n self.number = properties[0]\n self.density = float(properties[1])\n self.sweet = float(properties[2])\n\n\n# 数据处理\n\n_initalData = intialData.split(',') # 返回一个列表 例:“A,B,C”.split(\",\")结果为['A','B','C'] 即将一整个字符串根据所给的分隔符分割成多个字符串再放成列表类型返回\n_dataSet = []\nfor i in range(int(len(_initalData) / 3)): # 将data中的数据按三个数据一行划分\n _tupleData = tuple(_initalData[i * 3: i * 3 + 3])\n _dataSet.append(Watermelon(_tupleData)) # 将元祖数据定义为西瓜类放入数据集\n\n\n# 计算欧几里得距离,_firstTuple,_secondTuple分别为两个元组\ndef dist(_firsttuple, _secondtuple):\n # 每一个样本是有两个属性“密度”“含糖率”的二维向量\n return math.sqrt(math.pow(_firsttuple[0] - _secondtuple[0], 2) + math.pow(_firsttuple[1] - _secondtuple[1], 2))\n\n\n# k均值算法模型\ndef k_means(k, dataset, max_iter):\n _meanVector = np.random.choice(dataset, k) # choice从序列中随机选择k个样本作为均值向量 例random.choice([1,2,3,4,5,6,7,8,9])\n _meanVector = [(watermelon.density, watermelon.sweet) for watermelon in _meanVector] # 均值向量列表\n _categoryList = [[] for i in range(k)] # 初始化分类列表\n meanVector_update = [] # 均值向量更新列表\n while max_iter > 0:\n # 分类\n for i in dataset:\n temp = np.argmin([dist((i.density, i.sweet), _meanVector[j]) for j in range(len(_meanVector))])\n _categoryList[temp].append(i)\n # 更新均值向量\n for i in range(k):\n ui_density = 0.0\n ui_sweet = 0.0\n for j in _categoryList[i]:\n ui_density += j.density\n ui_sweet += j.sweet\n meanVector_update.append((ui_density / len(_categoryList[i]), ui_sweet / len(_categoryList[i])))\n # 每五次输出一次分类图\n if max_iter % 5 == 0:\n draw(_categoryList, _meanVector)\n # 比较U和U_update,如果相同则算法停止,得到最终的簇划分\n if _meanVector == meanVector_update:\n break\n _meanVector = meanVector_update\n meanVector_update = []\n _categoryList = [[] for i in range(k)]\n max_iter -= 1\n\n return _categoryList, _meanVector\n\n\n# 画图\ndef draw(_cL, _mV):\n colValue = ['r', 'y', 'g', 'b', 'c', 'k', 'm']\n for i in range(len(_cL)):\n coo_X = [] # x坐标列表\n coo_Y = [] # y坐标列表\n for j in range(len(_cL[i])):\n coo_X.append(_cL[i][j].density)\n coo_Y.append(_cL[i][j].sweet)\n pl.scatter(coo_X, coo_Y, marker='.', color=colValue[i % len(_cL)], s=80) # , label=str(i)\n # 展示均值向量\n u_x = []\n u_y = []\n for i in _mV:\n u_x.append(i[0])\n u_y.append(i[1])\n pl.scatter(u_x, u_y, marker='+', color=colValue[6], s=80) # , label=\"avg_vector\"\n # pl.legend(loc='upper right')\n pl.show()\n\n\ncategoryList, meanVector = k_means(3, _dataSet, 30)\ndraw(categoryList, meanVector)\n","repo_name":"Gchenyu/Machine-learning","sub_path":"k-means.py","file_name":"k-means.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37554724729","text":"import os\r\nimport copy\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset\r\nimport torchvision.transforms as transforms\r\nimport sys\r\nimport vgi\r\nfrom vgi.ct import FanRec\r\n\r\ndef fanFlatFBP(rec_shape, sino_shape):\r\n # Reconstruction configuration.\r\n n_angles, n_detectors = sino_shape\r\n det_width = 1.0\r\n source_origin = 512.\r\n origin_det = 512.\r\n ang_range = np.pi * 2\r\n rec_angles = np.linspace(0, ang_range, num = n_angles, endpoint = False)\r\n fbp = FanRec(rec_shape = rec_shape, \r\n sino_shape = sino_shape, \r\n angles = rec_angles,\r\n det_width = det_width,\r\n source_origin = source_origin,\r\n origin_det = origin_det) \r\n return fbp \r\n\r\n# Each dataset should be an npy file with the shape (images, height, width)\r\n# For sino(720, 768)\r\n# patch_layout = (9, 8)\r\n# patch_size = (80, 96) /16 => (5, 6)\r\n# over_lap = patch_size / 2 = (40, 48)\r\n# For reconstruction(512, 512)\r\n# patch_layout = (8, 8)\r\n# patch_size = (64, 64) /16 => (4, 4)\r\n# over_lap = patch_size / 2 = (32, 32)\r\nclass CtDataset(Dataset):\r\n def __init__(self, input_dir, target_dir, \r\n patch_layout = (9, 8),\r\n normalize = False,\r\n device = None,\r\n mem = True,\r\n np_dtype = np.float32,\r\n torch_dtype = torch.float32):\r\n if device is None:\r\n self.device = torch.device(\"cpu\")\r\n else:\r\n self.device = device\r\n self.np_dtype = np_dtype\r\n self.torch_dtype = torch_dtype\r\n\r\n self.input_dir = input_dir\r\n self.target_dir = target_dir\r\n self.filenames = [] \r\n\r\n self.data_shape = [0, 0]\r\n self.n_images = 0\r\n paths_init = vgi.getFiles(self.input_dir)\r\n for path in paths_init:\r\n _, filename, extname = vgi.parsePath(path)\r\n if extname == '.npy':\r\n self.filenames += [filename]\r\n if self.n_images == 0:\r\n data = np.load(path)\r\n self.n_images, self.data_shape[0], self.data_shape[1] = data.shape \r\n \r\n n_dims = len(self.data_shape) \r\n self.patch_layout = patch_layout \r\n self.patch_size = [self.data_shape[i] // self.patch_layout[i] for i in range(n_dims)]\r\n self.overlap_shape = [self.patch_size[i] // 2 for i in range(n_dims)]\r\n loc = [list(range(0, self.data_shape[i] - self.overlap_shape[i], self.overlap_shape[i])) for i in range(n_dims) ]\r\n self.patch_ranges = np.array([[r, r + self.patch_size[0], c, c + self.patch_size[1]] for r in loc[0] for c in loc[1]])\r\n self.n_patches = self.patch_ranges.shape[0]\r\n self.overlap_count = np.zeros(self.data_shape)\r\n for row, row_e, col, col_e in self.patch_ranges:\r\n self.overlap_count[row: row_e, col: col_e] += 1.0\r\n\r\n L = [['', i, p] for i in range(self.n_images) for p in range(self.n_patches)]\r\n self.idx = []\r\n for filename in self.filenames:\r\n Li = copy.deepcopy(L)\r\n for t in Li:\r\n t[0] = filename\r\n self.idx += Li\r\n self.n = len(self.idx)\r\n\r\n self.mem = mem\r\n self.input_file = None\r\n self.target_file = None\r\n self.filename = None\r\n self.normalize = normalize \r\n self.input_dataset = {}\r\n self.target_dataset = {}\r\n self.input_vranges = {}\r\n self.target_vranges = {}\r\n if self.mem:\r\n self.loadMem()\r\n\r\n # CtDataset::__init__\r\n\r\n def loadMem(self): \r\n for filename in self.filenames:\r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n input_data= np.load(input_path).astype(dtype = self.np_dtype)\r\n target_data = None\r\n if not (self.target_dir is None):\r\n target_path = self.target_dir + npy_name\r\n target_data = np.load(target_path).astype(dtype = self.np_dtype) \r\n if self.normalize:\r\n input_vr = (np.min(input_data), np.max(input_data))\r\n self.input_vranges[filename] = (input_vr)\r\n if not (self.target_dir is None):\r\n input_data = vgi.normalize(input_data) \r\n target_vr = (np.min(target_data), np.max(target_data))\r\n self.target_vranges[filename] = (target_vr) \r\n target_data = vgi.normalize(target_data) \r\n\r\n self.input_dataset[filename] = input_data\r\n self.target_dataset[filename] = target_data \r\n # CtDataset::loadMem\r\n\r\n\r\n def __len__(self):\r\n return self.n\r\n\r\n def item(self, idx):\r\n filename, image_id, patch_id = self.idx[idx]\r\n if filename != self.filename:\r\n if self.mem:\r\n self.input_file = self.input_dataset[filename]\r\n self.target_file = self.target_dataset[filename] \r\n else: \r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n self.input_file = np.load(input_path).astype(dtype = self.np_dtype)\r\n if self.normalize:\r\n self.input_file = vgi.normalize(self.input_file)\r\n self.target_file = None\r\n if not(self.target_dir is None):\r\n target_path = self.target_dir + npy_name\r\n self.target_file = np.load(target_path).astype(dtype = self.np_dtype) \r\n if self.normalize:\r\n self.target_file = vgi.normalize(self.target_file) \r\n self.filename = filename \r\n\r\n row, row_e, col, col_e = self.patch_ranges[patch_id] \r\n\r\n input_data = self.input_file[image_id, row:row_e, col:col_e]\r\n input_data = torch.tensor(input_data, dtype = self.torch_dtype, device = self.device)\r\n input_data = input_data.unsqueeze(0)\r\n\r\n target_data = None\r\n if not(self.target_dir is None):\r\n target_data = self.target_file[image_id, row:row_e, col:col_e]\r\n target_data = torch.tensor(target_data, dtype = self.torch_dtype, device = self.device)\r\n target_data = target_data.unsqueeze(0)\r\n\r\n return input_data, target_data, filename, image_id, patch_id \r\n # CtDataset::item \r\n\r\n def __getitem__(self, idx):\r\n input_data, target_data, filename, image_id, patch_id = self.item(idx)\r\n return input_data, target_data\r\n\r\n def volume(self, filename, image_id = None, patch_id = None, target = True):\r\n if self.mem:\r\n self.input_file = self.input_dataset[filename]\r\n self.target_file = self.target_dataset[filename]\r\n else:\r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n self.input_file = np.load(input_path).astype(dtype = self.np_dtype)\r\n if self.normalize:\r\n self.input_file = vgi.normalize(self.input_file)\r\n\r\n self.target_file = None\r\n if not(self.target_dir is None):\r\n target_path = self.target_dir + npy_name \r\n self.target_file = np.load(target_path).astype(dtype = self.np_dtype) \r\n if self.normalize:\r\n self.target_file = vgi.normalize(self.target_file) \r\n\r\n self.filename = filename \r\n \r\n input_data = torch.tensor(self.input_file, dtype = self.torch_dtype, device = self.device)\r\n input_data = input_data.unsqueeze(1)\r\n if target and not(self.target_dir is None):\r\n target_data = torch.tensor(self.target_file, dtype = self.torch_dtype, device = self.device)\r\n target_data = target_data.unsqueeze(1) \r\n else:\r\n target_data = None\r\n \r\n if image_id is None:\r\n if target and not(self.target_dir is None):\r\n return input_data, target_data \r\n else: \r\n return input_data \r\n elif patch_id is None:\r\n if target and not(self.target_dir is None):\r\n return input_data[image_id].unsqueeze(0), target_data[image_id].unsqueeze(0)\r\n else: \r\n return input_data[image_id].unsqueeze(0) \r\n else:\r\n row, row_e, col, col_e = self.patch_ranges[patch_id] \r\n if target and not(self.target_dir is None): \r\n return input_data[image_id, :, row:row_e, col:col_e].unsqueeze(0), target_data[image_id, :, row:row_e, col:col_e].unsqueeze(0) \r\n else: \r\n return input_data[image_id, :, row:row_e, col:col_e].unsqueeze(0) \r\n # CtDataset::volume \r\n\r\n def rawdata(self, filename, target = True):\r\n if self.mem:\r\n self.input_file = self.input_dataset[filename]\r\n self.target_file = self.target_dataset[filename]\r\n else:\r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n self.input_file = np.load(input_path).astype(dtype = self.np_dtype)\r\n if self.normalize:\r\n self.input_file = vgi.normalize(self.input_file)\r\n\r\n self.target_file = None\r\n if not(self.target_dir is None):\r\n target_path = self.target_dir + npy_name \r\n self.target_file = np.load(target_path).astype(dtype = self.np_dtype) \r\n if self.normalize:\r\n self.target_file = vgi.normalize(self.target_file) \r\n\r\n self.filename = filename \r\n if target and not(self.target_dir is None):\r\n return self.input_file, self.target_file \r\n else: \r\n return self.input_file \r\n \r\n # CtDataset::rawdata \r\n\r\n\r\n # For model output\r\n def loadInputPatches(self, file_id, image_s, patch_id, image_e = None, target = False):\r\n filename = self.filenames[file_id]\r\n if self.filename != filename:\r\n if self.mem:\r\n self.input_file = self.input_dataset[filename]\r\n self.target_file = self.target_dataset[filename]\r\n else:\r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n self.input_file = np.load(input_path).astype(dtype = self.np_dtype)\r\n if self.normalize:\r\n self.input_file = vgi.normalize(self.input_file)\r\n\r\n self.target_file = None\r\n if not(self.target_dir is None):\r\n target_path = self.target_dir + npy_name \r\n self.target_file = np.load(target_path).astype(dtype = self.np_dtype) \r\n if self.normalize:\r\n self.target_file = vgi.normalize(self.target_file)\r\n self.filename = filename\r\n if image_e is None:\r\n image_e = image_s + 1\r\n row, row_e, col, col_e = self.patch_ranges[patch_id]\r\n \r\n input_images = self.input_file[image_s:image_e] #[n, h, w]\r\n input_patches = input_images[..., row:row_e, col:col_e] #[n, h, w]\r\n _input_patches = torch.tensor(input_patches, dtype = self.torch_dtype, device = self.device)\r\n _input_patches = _input_patches.unsqueeze(1) #[n, 1, h, w]\r\n if target == False or (self.target_dir is None):\r\n return _input_patches\r\n\r\n target_images = self.target_file[image_s:image_e] #[n, h, w]\r\n target_patches = target_images[..., row:row_e, col:col_e] #[n, h, w]\r\n _target_patches = torch.tensor(target_patches, dtype = self.torch_dtype, device = self.device)\r\n _target_patches = _target_patches.unsqueeze(1) #[n, 1, h, w]\r\n return _input_patches, _target_patches\r\n # CtDataset::loadInputPatches \r\n\r\n\r\n def feed(self, model, file_id, image_id, batch_size = 1, target = False):\r\n out_shape = [batch_size] + self.data_shape\r\n outputs = np.zeros(out_shape, dtype = self.np_dtype)\r\n for patch_id in range(self.n_patches):\r\n _input_patches = self.loadInputPatches(file_id = file_id, patch_id = patch_id,\r\n image_s = image_id, image_e = image_id + batch_size, \r\n target = target)\r\n if target and not(self.target_dir is None):\r\n _target_patches = _input_patches[1]\r\n _input_patches = _input_patches[0]\r\n if model is None:\r\n _output_patches = _input_patches\r\n else:\r\n _output_patches = model(_input_patches)\r\n output_patches = vgi.toNumpy(_output_patches.squeeze()).astype(self.np_dtype)\r\n row, row_e, col, col_e = self.patch_ranges[patch_id]\r\n outputs[:, row: row_e, col: col_e] += output_patches \r\n outputs /= self.overlap_count \r\n if target:\r\n return outputs, vgi.toNumpy(_target_patches.squeeze()) \r\n else:\r\n return outputs\r\n # CtDataset::feed\r\n\r\n#@ CtDataSet \r\n\r\n\r\nclass SinoDataset(Dataset):\r\n def __init__(self, input_dir,\r\n intervals = [8, 4, 2], \r\n device = None,\r\n mem = True,\r\n np_dtype = np.float32,\r\n torch_dtype = torch.float32):\r\n if device is None:\r\n self.device = torch.device(\"cpu\")\r\n else:\r\n self.device = device\r\n self.np_dtype = np_dtype\r\n self.torch_dtype = torch_dtype\r\n\r\n self.input_dir = input_dir\r\n self.filenames = [] \r\n\r\n self.sino_shape = [0, 0]\r\n self.n_images = 0\r\n self.n_angles = 0\r\n self.n_detectors = 0\r\n paths_init = vgi.getFiles(self.input_dir)\r\n for path in paths_init:\r\n _, filename, extname = vgi.parsePath(path)\r\n if extname == '.npy':\r\n self.filenames += [filename]\r\n if self.n_images == 0:\r\n data = np.load(path)\r\n self.n_images, self.n_angles, self.n_detectors = data.shape \r\n self.sino_shape = (self.n_angles, self.n_detectors)\r\n \r\n\r\n self.intervals = intervals\r\n L = [] \r\n for i in range(self.n_images): \r\n for d in self.intervals:\r\n offset = d // 2\r\n for k in range(self.n_angles): \r\n L += [['', i, k, (k + d) % self.n_angles, (k + offset) % self.n_angles]]\r\n\r\n self.idx = []\r\n for filename in self.filenames:\r\n Li = copy.deepcopy(L)\r\n for t in Li:\r\n t[0] = filename\r\n self.idx += Li\r\n self.n = len(self.idx)\r\n\r\n self.pred_idx = []\r\n for d in self.intervals:\r\n n_angles_d = self.n_angles // d\r\n offset = d // 2\r\n Ld = []\r\n for k in range(n_angles_d):\r\n s = k * d\r\n Ld += [[s, (s + d) % self.n_angles, (s + offset) % self.n_angles ]] # [[sino_i, sino_j, sino_k]]\r\n #print(s, (s + d) % n_angles, s + offset)\r\n self.pred_idx += [Ld] \r\n\r\n self.mem = mem\r\n self.input_file = None\r\n self.filename = None \r\n self.input_dataset = {}\r\n if self.mem:\r\n self.loadMem()\r\n\r\n # SinoDataset::__init__\r\n\r\n def loadMem(self): \r\n for filename in self.filenames:\r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n input_data= np.load(input_path).astype(dtype = self.np_dtype)\r\n self.input_dataset[filename] = input_data\r\n # SinoDataset::loadMem\r\n\r\n\r\n def __len__(self):\r\n return self.n\r\n\r\n def loadData(self, filename):\r\n if filename != self.filename:\r\n if self.mem:\r\n self.input_file = self.input_dataset[filename]\r\n else: \r\n npy_name = filename + '.npy'\r\n input_path = self.input_dir + npy_name\r\n self.input_file = np.load(input_path).astype(dtype = self.np_dtype) \r\n self.filename = filename \r\n return self.input_file \r\n # SinoDataset::loadData\r\n\r\n def item(self, idx):\r\n filename, image_id, sino_i, sino_j, sino_k = self.idx[idx]\r\n data = self.loadData(filename)\r\n input_data = np.concatenate( \r\n [np.expand_dims(data[image_id, sino_i, :], 0),\r\n np.expand_dims(data[image_id, sino_j, :], 0)])\r\n target_data = np.expand_dims(data[image_id, sino_k, :], 0)\r\n\r\n input_data = torch.tensor(input_data, dtype = self.torch_dtype, device = self.device)\r\n target_data = torch.tensor(target_data, dtype = self.torch_dtype, device = self.device)\r\n input_data = input_data.unsqueeze(0)\r\n target_data = target_data.unsqueeze(0)\r\n\r\n return input_data, target_data, filename, image_id, sino_i, sino_j, sino_k \r\n # SinoDataset::item \r\n\r\n def __getitem__(self, idx):\r\n input_data, target_data, filename, image_id, sino_i, sino_j, sino_k = self.item(idx)\r\n return input_data, target_data\r\n\r\n \r\n\r\n # For model output\r\n def loadBatch(self, image_id, sino_i, sino_j, batch_size = 1, file_id = None, data = None, _tensor = True):\r\n if data is None:\r\n data = self.loadData(self.filenames[file_id])\r\n\r\n image_e = (image_id + batch_size)\r\n image_e = image_e if image_e < self.n_images else self.n_images\r\n \r\n Ai = data[image_id:image_e, sino_i, :]\r\n Aj = data[image_id:image_e, sino_j, :]\r\n Ai = np.expand_dims(Ai, 1)\r\n Aj = np.expand_dims(Aj, 1)\r\n B = np.concatenate([Ai, Aj], axis = 1)\r\n B = np.expand_dims(B, 1) \r\n if _tensor:\r\n return torch.tensor(B, dtype = self.torch_dtype, device = self.device)\r\n else: \r\n return B\r\n # SinoDataset::loadBatch \r\n\r\n\r\n def feed(self, model, file_id, image_id, batch_size = 1):\r\n out_shape = [batch_size] + list(self.sino_shape) # (batch_size, n_angles, n_detectors)\r\n data = self.loadData(self.filenames[file_id])\r\n outputs = np.zeros(out_shape, dtype = self.np_dtype)\r\n\r\n #print('SinoDataset::feed::data', data.shape)\r\n #print('SinoDataset::feed::outputs', outputs.shape)\r\n\r\n # initializing\r\n for sino_i, sino_j, sino_k in self.pred_idx[0]:\r\n _input = self.loadBatch(data = data, image_id = image_id, batch_size = batch_size, \r\n sino_i = sino_i, sino_j = sino_j, _tensor = True) \r\n input_data = vgi.toNumpy(_input.squeeze(axis = 1))\r\n outputs[:, sino_i, :] = input_data[:, 0, :] \r\n outputs[:, sino_j, :] = input_data[:, 1, :] \r\n\r\n # Interpolating\r\n for lv_idx in self.pred_idx:\r\n for sino_i, sino_j, sino_k in lv_idx:\r\n _input = self.loadBatch(data = outputs, image_id = 0, batch_size = batch_size, \r\n sino_i = sino_i, sino_j = sino_j, _tensor = True)\r\n # (batch_size, 1, 2, n_detectors)\r\n #print('SinoDataset::feed::_input', _input.shape)\r\n\r\n _output = model(_input).squeeze(axis = 1) # (batch_size, 1, n_detectors)\r\n output = vgi.toNumpy(_output).astype(self.np_dtype) # (batch_size, 1, n_detectors)\r\n outputs[:, sino_k, :] = output \r\n\r\n return outputs\r\n # SinoDataSet::feed\r\n\r\n#@ SinoDataSet ","repo_name":"jameschengcs/arusvct","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":21393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74426322001","text":"import os\nimport re\n\nfrom cliboa.adapter.ftp import FtpAdapter\nfrom cliboa.scenario.ftp import BaseFtp\nfrom cliboa.scenario.validator import EssentialParameters\nfrom cliboa.util.cache import ObjectStore\nfrom cliboa.util.constant import StepStatus\n\n\nclass FtpExtract(BaseFtp):\n def __init__(self):\n super().__init__()\n\n\nclass FtpDownload(FtpExtract):\n \"\"\"\n Download files from ftp server\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._quit = False\n\n def quit(self, quit):\n self._quit = quit\n\n def execute(self, *args):\n # essential parameters check\n valid = EssentialParameters(\n self.__class__.__name__,\n [self._host, self._user, self._src_dir, self._src_pattern],\n )\n valid()\n\n os.makedirs(self._dest_dir, exist_ok=True)\n\n obj = FtpAdapter().list_files(\n dir=self._src_dir,\n dest=self._dest_dir,\n pattern=re.compile(self._src_pattern),\n )\n\n adaptor = super().get_adaptor()\n files = adaptor.execute(obj)\n\n if self._quit is True and len(files) == 0:\n self._logger.info(\"No file was found. After process will not be processed\")\n return StepStatus.SUCCESSFUL_TERMINATION\n\n # cache downloaded file names\n ObjectStore.put(self._step, files)\n\n\nclass FtpDownloadFileDelete(FtpExtract):\n \"\"\"\n Delete all downloaded files.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def execute(self, *args):\n files = ObjectStore.get(self._symbol)\n\n if files is not None and len(files) > 0:\n self._logger.info(\"Delete files %s\" % files)\n\n self._host = super().get_step_argument(\"host\")\n self._user = super().get_step_argument(\"user\")\n self._password = super().get_step_argument(\"password\")\n self._timeout = super().get_step_argument(\"timeout\")\n self._retry_count = super().get_step_argument(\"retry_count\")\n self._port = super().get_step_argument(\"port\")\n self._tls = super().get_step_argument(\"tls\")\n\n adaptor = super().get_adaptor()\n for file in files:\n obj = FtpAdapter().remove_specific_file(\n dir=self._src_dir,\n fname=file,\n )\n adaptor.execute(obj)\n self._logger.info(\"%s is successfully deleted.\" % file)\n else:\n self._logger.info(\"No files to delete.\")\n","repo_name":"BrainPad/cliboa","sub_path":"cliboa/scenario/extract/ftp.py","file_name":"ftp.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"1682837741","text":"\"\"\"\nWSGI config for VOSQA project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/\n\"\"\"\n\nimport os\nimport sys\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\nosqa_dir = current_dir[:current_dir.rfind('/')]\nparent_dir = current_dir[:osqa_dir.rfind('/')]\n\nsys.path.append(parent_dir)\nsys.path.append(osqa_dir)\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"vosqa.settings\")\n\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n","repo_name":"konrado0/vosqa","sub_path":"conf/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14183429143","text":"# !/usr/bin/python\n# coding=utf-8\n\nfrom __future__ import (absolute_import, division, print_function, unicode_literals)\n\nimport unittest\nfrom unittest import TestCase\n\nfrom ares_util.ares import get_czech_zip_code, guess_czech_street_from_full_text_address, build_city\n\n\nclass ZIPCodeTestCase(TestCase):\n def test_get_czech_zip_code(self):\n expected = \"4200\"\n actual = get_czech_zip_code(ares_data=\"\", full_text_address=\"U obchodního rejstříku 15, Praha, PSČ 4200\")\n self.assertEqual(expected, actual)\n\n expected = \"1111\"\n actual = get_czech_zip_code(ares_data=\"1111\", full_text_address=\"U obchodního rejstříku 15, Praha, PSČ 2222\")\n self.assertEqual(expected, actual)\n\n @unittest.skip(\"This will be fixed in the next verion\")\n def test_get_czech_zip_code_1(self):\n expected = \"27704\"\n actual = get_czech_zip_code(ares_data=\"\", full_text_address=\"Daminěves 35, 277 04 Cítov\")\n\n self.assertEqual(expected, actual)\n\n\nclass CzechStreetTestCase(TestCase):\n def test_get_street(self):\n full_text = \"Praha, U Pythonisty 42/36, PSČ 4200\"\n expected = \"U Pythonisty 42/36\"\n\n actual = guess_czech_street_from_full_text_address(full_text)\n\n self.assertEqual(expected, actual)\n\n @unittest.skip(\"This will be fixed in the next verion\")\n def test_get_street_reversed(self):\n full_text = \"Daminěves 35, 277 04 Cítov\"\n expected = \"Daminěves 35\"\n\n actual = guess_czech_street_from_full_text_address(full_text)\n\n self.assertEqual(expected, actual)\n\n\nclass CityTestCase(TestCase):\n @unittest.skip(\"This will be fixed in the next verion\")\n def test_get_czech_zip_code(self):\n full_text = \"Daminěves 35, 277 04 Cítov\"\n expected = \"Cítov\"\n\n actual = build_city(city=None, address=full_text)\n\n self.assertEqual(expected, actual)\n","repo_name":"illagrenan/ares_util","sub_path":"tests/parser_test.py","file_name":"parser_test.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"32216912077","text":"import os\n\n\n\n\nfor line in open(\"/gpu-data/filby/BoLD/BOLD_public/annotations/train.csv\").readlines():\n video_path = os.path.join(\"/gpu-data/filby/BoLD/BOLD_public/videos\", line.split(\",\")[0])\n print(video_path)\n DIR_OPENPOSE = video_path + \"_openface\"\n if not os.path.exists(DIR_OPENPOSE):\n\t os.mkdir(DIR_OPENPOSE)\n\n command = \"./FaceLandmarkVidMulti -f {video} -out_dir {DIR_OPENPOSE} -simsize 300\"\n c = command.format(video=video_path, DIR_OPENPOSE=DIR_OPENPOSE)\n os.system(c)\n # raise\n# print(c)\n","repo_name":"filby89/incremental-learning-CRI","sub_path":"utils/apply_openface_to_bold.py","file_name":"apply_openface_to_bold.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"13128733946","text":"import sys\n\ndef create_table(file1, file2):\n table = {}\n with open(file1, 'r') as f1, open(file2, 'r') as f2:\n for line1, line2 in zip(f1, f2):\n header, classification = line1.strip().split('\\t')\n pi = line2.strip().split('\\t')[2]\n table[header] = (classification, pi)\n\n return table\n\ndef write_table(table, output_file):\n with open(output_file, 'w') as f:\n for header, values in table.items():\n classification, pi = values\n f.write(f\"{header}\\t{classification}\\t{pi}\\n\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 4:\n print(\"Usage: python3 script.py \")\n sys.exit(1)\n\n file1 = sys.argv[1]\n file2 = sys.argv[2]\n output_file = sys.argv[3]\n\n table = create_table(file1, file2)\n write_table(table, output_file)\n","repo_name":"aleysary/Appalachian-Laboratory-SummerIntern2023","sub_path":"ComparisonP.py","file_name":"ComparisonP.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40621313968","text":"import logging\r\nimport platform\r\n\r\nfrom PyQt5 import QtGui\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QStyle, QFrame, QProgressBar\r\n\r\n#from JSONFileDialog import JSONFileDialog\r\nfrom JFileDialog import JFileDialog\r\nfrom ImportHandler import ImportThread\r\n\r\nclass ImportManager(QWidget):\r\n def __init__(self, parent):\r\n logging.debug(\"ImportManager(): Instantiated\")\r\n super(ImportManager, self).__init__(parent)\r\n mainlayout = QVBoxLayout(self)\r\n layout1 = QHBoxLayout()\r\n layout2 = QHBoxLayout()\r\n\r\n import_zip_label = QLabel(\"Select ZIP file to import: \")\r\n import_zip_label.setFont(QtGui.QFont(\"Times\", weight = QtGui.QFont.Bold))\r\n import_zip_label.setAlignment(Qt.AlignCenter)\r\n\r\n self.zip_le = QLineEdit()\r\n self.zip_le.setFont(QtGui.QFont(\"Times\", weight=QtGui.QFont.Bold))\r\n self.zip_le.setAlignment(Qt.AlignLeft)\r\n self.zip_le.textChanged.connect(self.enable_zip_clear)\r\n\r\n zip_directory_butt = QPushButton()\r\n zip_directory_butt.setIcon(self.style().standardIcon(getattr(QStyle, 'SP_DirIcon')))\r\n zip_directory_butt.clicked.connect(self.zip_file_dialog)\r\n\r\n self.zip_clear_butt = QPushButton(\"Clear\")\r\n self.zip_clear_butt.clicked.connect(self.clear_zip)\r\n self.zip_clear_butt.setEnabled(False)\r\n\r\n self.import_butt = QPushButton(\"Start Importing Files\")\r\n self.import_butt.clicked.connect(self.start_import)\r\n self.import_butt.setEnabled(False)\r\n\r\n separator = QFrame()\r\n separator.setFrameShape(QFrame.HLine)\r\n separator.setFrameShadow(QFrame.Sunken)\r\n separator.setFixedHeight(10)\r\n separator.setLineWidth(3)\r\n\r\n status_title_label = QLabel(\"Status of Imported Files\")\r\n status_title_label.setFont(QtGui.QFont(\"Times\", weight = QtGui.QFont.Bold))\r\n status_title_label.setAlignment(Qt.AlignCenter)\r\n\r\n self.zip_status_label = QLabel(\"ZIP File Status: N\\A\")\r\n self.zip_status_label.setFont(QtGui.QFont(\"Times\", weight=QtGui.QFont.Bold))\r\n self.zip_status_label.setAlignment(Qt.AlignCenter)\r\n\r\n self.status_completion_label = QLabel(\"Completion Status: Unavailable\")\r\n self.status_completion_label.setFont(QtGui.QFont(\"Times\", weight = QtGui.QFont.Bold))\r\n self.status_completion_label.setAlignment(Qt.AlignCenter)\r\n\r\n self.progress_bar_counter = 0\r\n self.progress_bar = QProgressBar()\r\n self.progress_bar.setMaximum(100)\r\n\r\n layout1.addWidget(import_zip_label)\r\n layout1.addWidget(self.zip_le)\r\n layout1.addWidget(zip_directory_butt)\r\n layout1.addWidget(self.zip_clear_butt)\r\n\r\n layout2.addWidget(self.zip_status_label)\r\n\r\n mainlayout.addStretch()\r\n mainlayout.addLayout(layout1)\r\n mainlayout.addStretch()\r\n mainlayout.addWidget(self.import_butt)\r\n mainlayout.addWidget(separator)\r\n mainlayout.addStretch()\r\n mainlayout.addWidget(status_title_label)\r\n mainlayout.addStretch()\r\n mainlayout.addLayout(layout2)\r\n mainlayout.addStretch()\r\n mainlayout.addWidget(self.status_completion_label)\r\n mainlayout.addWidget(self.progress_bar)\r\n mainlayout.addStretch()\r\n logging.debug(\"ImportManager(): Complete\")\r\n\r\n def enable_zip_clear(self):\r\n logging.debug(\"enable_zip_clear(): Instantiated\")\r\n self.zip_clear_butt.setEnabled(True)\r\n self.update_import_butt()\r\n logging.debug(\"enable_zip_clear(): Complete\")\r\n\r\n def clear_zip(self):\r\n logging.debug(\"clear_zip(): Instantiated\")\r\n self.zip_le.setText(\"\")\r\n self.zip_clear_butt.setEnabled(False)\r\n self.update_import_butt()\r\n logging.debug(\"clear_zip(): Complete\")\r\n\r\n def update_import_butt(self):\r\n logging.debug(\"update_import_butt(): Instantiated\")\r\n if self.zip_clear_butt.isEnabled():\r\n self.import_butt.setEnabled(True)\r\n else:\r\n self.import_butt.setEnabled(False)\r\n logging.debug(\"update_import_butt(): Complete\")\r\n\r\n def zip_file_dialog(self):\r\n logging.debug(\"zip_file_dialog(): Instantiated\")\r\n file_chosen = JFileDialog().json_dialog()\r\n self.zip_le.setText(file_chosen)\r\n logging.debug(\"zip_file_dialog(): Complete\")\r\n\r\n def start_import(self):\r\n logging.debug(\"start_import(): Instantiated\")\r\n self.import_butt.setEnabled(False)\r\n self.progress_bar_counter = 0\r\n self.progress_bar.setValue(self.progress_bar_counter)\r\n bool_file_list = self.initial_status_update()\r\n self.import_thread = ImportThread(self.zip_le.text(), bool_file_list)\r\n self.import_thread.signal1.connect(self.successful_zip_status)\r\n self.import_thread.signal2.connect(self.error_zip_status)\r\n self.import_thread.signal3.connect(self.update_progress_bar)\r\n self.import_thread.signal4.connect(self.finish_progress_bar)\r\n self.import_thread.start()\r\n logging.debug(\"start_import(): Complete\")\r\n\r\n def initial_status_update(self):\r\n logging.debug(\"initial_status_update(): Instantiated\")\r\n temp_list = []\r\n if self.zip_le.text() == '':\r\n self.zip_status_label.setText(\"ZIP File Status: No File Input\")\r\n temp_list.append(0)\r\n else:\r\n self.zip_status_label.setText(\"ZIP File Status: Starting\")\r\n temp_list.append(1)\r\n self.status_completion_label.setText(\"Completion Status: Starting\")\r\n logging.debug(\"initial_status_update(): Complete\")\r\n return temp_list\r\n\r\n def successful_zip_status(self):\r\n logging.debug(\"successful_zip_status(): Instantiated\")\r\n self.zip_status_label.setText(\"ZIP File Status: Successful Import\")\r\n logging.debug(\"successful_config_status(): Complete\")\r\n\r\n def error_zip_status(self):\r\n logging.debug(\"error_zip_status(): Instantiated\")\r\n self.zip_status_label.setText(\"ZIP File Status: Error Encountered on Import\")\r\n logging.debug(\"error_zip_status(): Complete\")\r\n\r\n def update_progress_bar(self):\r\n logging.debug(\"update_progress_bar(): Instantiated\")\r\n self.progress_bar_counter += 10\r\n if self.progress_bar_counter == 10:\r\n self.status_completion_label.setText(\"Completion Status: Working on ZIP File\")\r\n elif self.progress_bar_counter == 100:\r\n self.status_completion_label.setText(\"Completion Status: Finished ZIP File\")\r\n self.progress_bar.setValue(self.progress_bar_counter)\r\n logging.debug(\"update_progress_bar(): Complete\")\r\n\r\n def finish_progress_bar(self):\r\n logging.debug(\"finish_progress_bar(): Instantiated\")\r\n self.progress_bar_counter = 100\r\n self.status_completion_label.setText(\"Completion Status: Finished Importing Files\")\r\n self.import_butt.setEnabled(True)\r\n self.progress_bar.setValue(self.progress_bar_counter)\r\n logging.debug(\"finish_progress_bar(): Complete\")","repo_name":"seb1gonzalez/abs","sub_path":"components/packager/Packager2.0/ImportSubcomponent.py","file_name":"ImportSubcomponent.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24742792733","text":"class Solution(object):\n def coinChange(self, coins, amount):\n \"\"\"\n :type coins: List[int]\n :type amount: int\n :rtype: int\n \"\"\"\n coins.sort()\n\n if amount == 0:\n return 0\n\n if coins[0] > amount:\n return -1\n\n while coins and coins[-1] > amount:\n coins.pop()\n\n cache = [None]*(amount+1)\n cache[0] = 0\n for i in range(1, coins[0]):\n cache[i] = -1\n\n for i in range(coins[0], amount+1):\n min = -1\n for coin in coins:\n sub = i - coin\n if sub<0:\n break\n if cache[sub]>=0:\n temp = cache[sub] + 1\n min = temp if (temp date_from_db]\n\n if(len(df)==0):\n return {\n 'statusCode': 200,\n 'body': json.dumps('Pipeline has already downloaded up to date data')\n }\n\n json_string = df.to_json()\n\n s3object = s3.Object(BUCKET_NAME,f\"inbox/{date_from_db.replace('-','_')}_{to_date.replace('-','_')}_dfrac.json\")\n\n s3object.put(\n Body=bytes(json_string.encode('UTF-8'))\n )\n\n save_from_date_to_date({\"from_date\": date_from_db, \"to_date\": to_date})\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Pipeline has been executed successfully')\n }","repo_name":"vaasu2002/Fake-News-Detecting-Training-Pipeline","sub_path":"lambda/function1/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1397361497","text":"import struct\nimport rclpy\nfrom rclpy.node import Node\nfrom rosbags.rosbag2 import Reader\nfrom rosbags.serde import deserialize_cdr\nfrom sensor_msgs.msg import Image, PointCloud2, PointField\nimport std_msgs.msg\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nimport cv2\nimport numpy as np\nimport threading\nimport sys\nimport os\nimport time\nimport subprocess\nimport signal\n\n\n#/home/nm/Downloads/rosbag2_2022_10_14-11_14_30 # T265\n#/home/nm/Downloads/PL_MAPPER_bags/bag_uncompressed # both img and pcl\n\nglobal parsing_radar_done\nparsing_radar_done = 0\nglobal parsing_img_done\nparsing_img_done = 0\n\nclass DataLoader(Node):\n def __init__(self, bag_path, img_topic, mmw_topic):\n super().__init__('data_loader')\n \n\n self.bag_path = bag_path#'/home/nm/Downloads/PL_MAPPER_bags/sim_topics'\n\n self.img_topic = img_topic\n self.mmw_topic = mmw_topic\n\n\n\n self.img_pub_topic = img_topic\n self.mmw_pub_topic = mmw_topic\n \n\n self.img_publisher_ = self.create_publisher(Image, self.img_pub_topic, 10)\n self.mmw_publisher_ = self.create_publisher(PointCloud2, self.mmw_pub_topic, 10)\n\n self.bridge = CvBridge()\n\n self.img_msgs = []\n self.mmw_msgs = []\n\n\n def pcl_to_numpy(self, pcl_msg):\n\n points = []\n\n n_points = int(len(pcl_msg.data)/3/4)\n\n for i in range(n_points):\n point = [0,0,0]\n\n for j in range(3):\n point[j] = struct.unpack(\"f\", pcl_msg.data[i*12+j*4:i*12+(j+1)*4])[0]\n\n points.append(point)\n \n arr = np.asarray(points)\n\n return arr\n\n\n def load_data(self):\n # create reader instance and open for reading\n with Reader(self.bag_path) as reader:\n\n # iterate over messages\n for connection, timestamp, rawdata in reader.messages():\n\n if connection.topic == self.img_topic:\n msg = deserialize_cdr(rawdata, connection.msgtype)\n self.img_msgs.append(msg)\n\n elif connection.topic == self.mmw_topic:\n msg = deserialize_cdr(rawdata, connection.msgtype)\n self.mmw_msgs.append(msg)\n\n print(\"Finished loading messages\")\n\n\n def republish_img(self): \n print(\"Republishing images...\")\n\n now_start_time = 0\n now_start_sec = 0\n now_start_nanosec = 0\n orig_start_sec = 0\n orig_start_nanosec = 0\n now_nano_delta = 0\n now_sec_delta = 0\n orig_nano_delta = 0\n orig_sec_delta = 0\n now_delta = 0\n orig_delta = 0\n it = 0\n\n # iterate over messages\n for i in range(len(self.img_msgs)):\n\n if it == 0:\n now_start_time = self.get_clock().now().to_msg()\n now_start_sec = now_start_time.sec\n now_start_nanosec = now_start_time.nanosec\n orig_start_sec = self.img_msgs[0].header.stamp.sec\n orig_start_nanosec = self.img_msgs[0].header.stamp.nanosec\n else:\n orig_sec_delta = self.img_msgs[i].header.stamp.sec - orig_start_sec\n orig_nano_delta = self.img_msgs[i].header.stamp.nanosec - orig_start_nanosec\n orig_delta = orig_sec_delta*1000 + orig_nano_delta/1000000\n\n # wait the correct amount of time before publishing\n while(not(now_delta > orig_delta)):\n now_time = self.get_clock().now().to_msg()\n now_sec_delta = now_time.sec - now_start_sec\n now_nano_delta = now_time.nanosec - now_start_nanosec\n now_delta = now_sec_delta*1000 + now_nano_delta/1000000 \n\n it = it + 1\n\n # convert original image to numpy array\n cv_image = self.bridge.imgmsg_to_cv2(self.img_msgs[i], desired_encoding='passthrough')\n\n # create new message with numpy array as data\n img_msg = Image()\n img_msg = self.bridge.cv2_to_imgmsg(cv_image, encoding='passthrough')\n img_msg.header = std_msgs.msg.Header()\n img_msg.header.stamp = self.get_clock().now().to_msg() #msg.header.stamp\n img_msg.header.stamp.sec = self.img_msgs[i].header.stamp.sec\n img_msg.header.stamp.nanosec = self.img_msgs[i].header.stamp.nanosec\n img_msg.header.frame_id = self.img_msgs[i].header.frame_id\n img_msg.height = self.img_msgs[i].height\n img_msg.width = self.img_msgs[i].width\n img_msg.encoding = self.img_msgs[i].encoding\n img_msg.is_bigendian = self.img_msgs[i].is_bigendian\n img_msg.step = self.img_msgs[i].step\n self.img_publisher_.publish(img_msg)\n\n global parsing_img_done\n parsing_img_done = 1\n print(\"Images finished\")\n\n\n def republish_mmw(self):\n print(\"Republishing radar data...\")\n\n now_start_time = 0\n now_start_sec = 0\n now_start_nanosec = 0\n orig_start_sec = 0\n orig_start_nanosec = 0\n now_nano_delta = 0\n now_sec_delta = 0\n orig_nano_delta = 0\n orig_sec_delta = 0\n now_delta = 0\n orig_delta = 0\n it = 0\n\n # iterate over messages\n for i in range(len(self.mmw_msgs)):\n\n if it == 0:\n now_start_time = self.get_clock().now().to_msg()\n now_start_sec = now_start_time.sec\n now_start_nanosec = now_start_time.nanosec\n orig_start_sec = self.mmw_msgs[0].header.stamp.sec\n orig_start_nanosec = self.mmw_msgs[0].header.stamp.nanosec\n else:\n orig_sec_delta = self.mmw_msgs[i].header.stamp.sec - orig_start_sec\n orig_nano_delta = self.mmw_msgs[i].header.stamp.nanosec - orig_start_nanosec\n orig_delta = orig_sec_delta*1000 + orig_nano_delta/1000000\n\n # wait the correct amount of time before publishing\n while(not(now_delta > orig_delta)):\n\n now_time = self.get_clock().now().to_msg()\n now_sec_delta = now_time.sec - now_start_sec\n now_nano_delta = now_time.nanosec - now_start_nanosec\n now_delta = now_sec_delta*1000 + now_nano_delta/1000000 \n\n it = it + 1\n\n # create new message with numpy array as data\n pcl_msg = PointCloud2()\n pcl_msg.header = std_msgs.msg.Header()\n pcl_msg.header.stamp = self.get_clock().now().to_msg() #msg.header.stamp\n pcl_msg.header.stamp.sec = self.mmw_msgs[i].header.stamp.sec\n pcl_msg.header.stamp.nanosec = self.mmw_msgs[i].header.stamp.nanosec\n pcl_msg.header.frame_id = self.mmw_msgs[i].header.frame_id\n pcl_msg.height = self.mmw_msgs[i].height\n pcl_msg.width = self.mmw_msgs[i].width\n pcl_msg.fields = [PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1),\n PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1),\n PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1)]\n pcl_msg.point_step = self.mmw_msgs[i].point_step #size of 1 point (float32 * dimensions (3 when xyz))\n pcl_msg.row_step = pcl_msg.point_step*pcl_msg.width # only 1 row because unordered\n pcl_msg.is_dense = True\n pcl_msg.data = self.mmw_msgs[i].data.tostring()\n\n self.mmw_publisher_.publish(pcl_msg)\n\n global parsing_radar_done\n parsing_radar_done = 1\n print(\"Radar data finished\")\n\n\n\ndef main(argv=None):\n\n if len(sys.argv) == 5:\n bag_path = sys.argv[1] \n img_topic = sys.argv[2]\n mmw_topic = sys.argv[3]\n bag_dir = sys.argv[4]\n elif len(sys.argv) > 5:\n print(\"Too many arguments given. Usage: \\n ros2 run rosbag2_upgrader upgrade \")\n exit()\n elif len(sys.argv) < 5:\n print(\"Too few arguments given. Usage: \\n ros2 run rosbag2_upgrader upgrade \")\n exit()\n\n\n #init\n rclpy.init()\n minimal_publisher = DataLoader(bag_path=bag_path, img_topic=img_topic, mmw_topic=mmw_topic)\n minimal_publisher.load_data()\n\n recorder = subprocess.Popen(['ros2', 'bag', 'record', '-o', bag_dir, img_topic, mmw_topic])\n\n time.sleep(0.5)\n\n mmw_thread = threading.Thread(target=minimal_publisher.republish_mmw)\n cam_thread = threading.Thread(target=minimal_publisher.republish_img)\n\n mmw_thread.start()\n cam_thread.start()\n\n\n global parsing_img_done\n global parsing_radar_done\n while(parsing_img_done != 1 or parsing_radar_done != 1):\n time.sleep(0.5)\n\n #shutdown\n mmw_thread.join()\n cam_thread.join()\n recorder_pid = recorder.pid\n os.kill(recorder_pid, signal.SIGINT) # ctrl+c to ros2 bag record\n\n print(\"Done\")\n minimal_publisher.destroy_node()\n rclpy.shutdown()\n exit()\n \n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nhma20/rosbag2_upgrader","sub_path":"rosbag2_upgrader/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":9075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20155175267","text":"from __future__ import print_function\nimport topology\nfrom time import sleep\n\n\ntorus = topology.Torus(5,6)\ntorus.initialise()\ntopology.injectRandomLinkFaults(torus, 10)\ntorus.printTopologyMap(False)\n\ntorus.initialise()\n# torus.routers[0][3].setLinkHealthList([1,0,0,0])\n# torus.routers[3][0].setLinkHealthList([0,0,0,0])\ntopology.injectRandomRouterFaults(mesh, 4, True, 1)\n\nmesh = topology.Mesh(26,28)\nmesh.initialise()\ntopology.injectRandomRouterFaults(mesh, 26*27, True, 0.05)","repo_name":"arbaranwal/noc","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"20600408007","text":"from django import forms\nfrom .models import Listings, ListingMedia\n\n\n# This is the form to create a new listing\nclass ListingCreateForm(forms.ModelForm):\n class Meta:\n model = Listings\n widgets = {\n \"featured_image\": forms.FileInput(\n attrs={\n \"enctype\": \"multipart/form-data\"\n }\n ),\n # Hidden inputs are used by create.js to relay values\n \"boat_feature_list\": forms.HiddenInput(attrs={'required': 'false'}), # noqa\n \"type\": forms.HiddenInput(),\n \"condition\": forms.HiddenInput(),\n \"tax_paid\": forms.HiddenInput(),\n \"category\": forms.HiddenInput(),\n }\n fields = \"__all__\"\n exclude = (\"created_by\", \"created_on\", \"last_modified\",\n \"listing_status\", )\n\n\n# This form edits listings & is the same as create except for no feaured_image\nclass ListingEditForm(forms.ModelForm):\n class Meta:\n model = Listings\n widgets = {\n \"boat_feature_list\": forms.HiddenInput(attrs={'required': 'false'}), # noqa\n \"type\": forms.HiddenInput(),\n \"condition\": forms.HiddenInput(),\n \"tax_paid\": forms.HiddenInput(),\n \"category\": forms.HiddenInput(),\n }\n fields = \"__all__\"\n exclude = (\"created_by\", \"created_on\", \"last_modified\",\n \"featured_image\",)\n\n\n# This form is for the gallery images, not multiple attr is true\nclass ListingMediaForm(forms.ModelForm):\n class Meta:\n widgets = {\n \"image\": forms.ClearableFileInput(\n attrs={\n \"multiple\": True\n }\n ),\n }\n model = ListingMedia\n fields = ['image']\n","repo_name":"ancfoster/Next-Boat","sub_path":"listings/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"9765956874","text":"#!/usr/bin/python3\r\nimport glob, os, hashlib\r\n\r\nos.getcwd()\r\n\r\nhashValue = [] \r\n#hashValue \r\nfor file in os.listdir(\"files\"):\r\n if file.endswith(\".txt\"):\r\n with open(os.path.join(\"files\", file), 'rb') as f:\r\n print(file)\r\n file_contents = f.read()\r\n hash=hashlib.sha1(file_contents).hexdigest()\r\n hashValue.append(hash)\r\n\r\nprint(\"Hash value of all the contnts:\")\r\nprint(hashValue)\r\nprint(\"\\n\")\r\nif(len(hashValue)%2!=0):\r\n hashValue.append(hashValue[-1])\r\nwhile(len(hashValue)>1):\r\n j=0\r\n for i in range(0, len(hashValue)-1):\r\n f = str(hashValue[i]+hashValue[i+1])\r\n hashValue[j]=hashlib.sha1(f.encode()).hexdigest()\r\n \r\n i+=2\r\n j+=1\r\n del hashValue[j:]\r\n\r\nif (len(hashValue)==1):\r\n print(\"Top Hash is:\") \r\n print(hashValue)\r\n with open(\"check.aap\", \"a\") as a:\r\n a.write(hashValue[0])\r\n print(\"The Top Hash has been saved in check.aap\") \r\n \r\n \r\n","repo_name":"charityCyberSecurity/Top-Hash","sub_path":"top_hash.py","file_name":"top_hash.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26378764485","text":"import boto3\nimport botocore\nfrom logging import getLogger\nimport json\nimport requests\nimport os\n\n# x-ray patch\nfrom aws_xray_sdk.core import patch\n\npatch([\"boto3\"])\n\nlog = getLogger(__name__)\nssm = boto3.client(\"ssm\")\nresponse = ssm.get_parameter(\n Name=\"webhookURL\",\n WithDecryption=True\n)\nteams_url = response[\"Parameter\"][\"Value\"]\ncfnclient = boto3.client(\"cloudformation\")\nsnsclient = boto3.client(\"sns\")\naws_region = boto3.session.Session().region_name\naccount_id = boto3.client(\"sts\").get_caller_identiry().get(\"Account\")\ntopic_arn = (\n \"arn:aws:sns:\"\n + aws_region\n + \":\"\n + account_id\n + \":\"\n + os.environ[\"ENV_NAME\"]\n + \"-CFn-Update-Notification\"\n)\n\n\ndef retry_treatment(event, context):\n change_service = event[\"requestPayload\"][\"Service\"]\n log.info(f\"Checking {change_service} Service\")\n\n waiter = cfnclient.get_waiter(\"stack_update_complete\")\n # 30 sec * 26 times\n waiter.config.maxAttempts = 26\n\n try:\n waiter.wait(Stackname=f\"prod-{change_service}-stack\")\n except botocore.exceptions.WaiterError as e:\n log.info(e)\n Msg = f\"Not Accomplished Updating {change_service} stack Service\"\n Success = False\n else:\n requests.post(\n teams_url,\n json.dumps(\n {\n \"title\": \"Update Stacks Complete\",\n \"text\": f\"{change_service} is changed.\",\n }\n ),\n )\n Msg = f\"Update {change_service} stack complete\"\n Success = True\n finally:\n return {\"Success\": Success, \"body\": Msg, \"Service\": change_service}\n\n\ndef lambda_handler(event, context):\n response = retry_treatment(event, context)\n if response[\"Success\"]:\n snsclient.publish(\n TopicArn=topic_arn,\n Message=response[\"body\"],\n Subject=\"CFn Update Notification\",\n )\n else:\n snsclient.publish(\n TopicArn=topic_arn,\n Message=response[\"body\"],\n Subject=\"CFn Update Notification\",\n )\n","repo_name":"shohta-tera/AWS_parameterstore_update","sub_path":"src/CFnUpdate/checkupdatestatus.py","file_name":"checkupdatestatus.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"1798348795","text":"from django.urls import path\r\nfrom .views import *\r\n\r\nurlpatterns = [\r\n path('apply/', Apply.as_view()),\r\n path('personalinformation/', PersonalInformation.as_view()),\r\n path('universityselection/', UniversityInformation.as_view()),\r\n path('educationinformation/', EducationInformation.as_view()),\r\n path('uploaddocuments/', UploadDocumnts.as_view()),\r\n]","repo_name":"Inamul2/test","sub_path":"rapid/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"74898303417","text":"from geoprocessor.core.CommandLogRecord import CommandLogRecord\nfrom geoprocessor.core.CommandStatusType import CommandStatusType\nfrom geoprocessor.core.CommandPhaseType import CommandPhaseType\n\n\nclass CommandStatus(object):\n \"\"\"\n Class to hold the command status, including a log of messages generated when initializing,\n discovering, and running the command.\n \"\"\"\n def __init__(self) -> None:\n \"\"\"\n Initialize the instance.\n \"\"\"\n # The default status of the command is UNKNOWN.\n self.initialization_status = CommandStatusType.UNKNOWN\n self.discovery_status = CommandStatusType.UNKNOWN\n self.run_status = CommandStatusType.UNKNOWN\n\n self.initialization_log_list = []\n self.discovery_log_list = []\n self.run_log_list = []\n\n def add_to_log(self, command_phase: CommandPhaseType, log_record: CommandLogRecord) -> None:\n \"\"\"\n Add a CommandLogRecord instance to the command status, for the specific command phase.\n The overall status for the command phase is also set to the maximum severity,\n which is the previous maximum severity and that of the new log record.\n\n Args:\n command_phase: The command phase for the log record (e.g, CommandPhaseType.RUN)\n log_record: A CommandLogRecord instance.\n\n Returns:\n None.\n \"\"\"\n if command_phase is CommandPhaseType.INITIALIZATION:\n self.initialization_status = CommandStatusType.max_severity(\n self.initialization_status, log_record.severity)\n self.initialization_log_list.append(log_record)\n elif command_phase is CommandPhaseType.DISCOVERY:\n self.discovery_status = CommandStatusType.max_severity(self.discovery_status, log_record.severity)\n self.discovery_log_list.append(log_record)\n elif command_phase is CommandPhaseType.RUN:\n self.run_status = CommandStatusType.max_severity(self.run_status, log_record.severity)\n self.run_log_list.append(log_record)\n\n def clear_log(self, command_phase: CommandPhaseType = None) -> None:\n \"\"\"\n Clear the CommandLogRecord list for the command.\n The run status for all phases is set to CommandStatusType.UNKNOWN.\n\n Args:\n command_phase: Phase of running a command (see CommandPhaseType) or None to clear logs for all phases.\n\n Returns:\n None.\n \"\"\"\n if command_phase is CommandPhaseType.INITIALIZATION or command_phase is None:\n del self.initialization_log_list[:]\n self.initialization_status = CommandStatusType.UNKNOWN\n elif command_phase is CommandPhaseType.DISCOVERY or command_phase is None:\n del self.discovery_log_list[:]\n self.discovery_status = CommandStatusType.UNKNOWN\n elif command_phase is CommandPhaseType.RUN or command_phase is None:\n del self.run_log_list[:]\n self.run_status = CommandStatusType.UNKNOWN\n\n def get_command_status_for_phase(self, command_phase: CommandPhaseType) -> CommandStatusType:\n \"\"\"\n Return the command status for a phase of command processing.\n\n Args:\n command_phase: Command phase for which to get the status.\n\n Returns:\n Command status for the specified command phase.\n \"\"\"\n if command_phase is CommandPhaseType.INITIALIZATION:\n return self.initialization_status\n elif command_phase is CommandPhaseType.DISCOVERY:\n return self.discovery_status\n elif command_phase is CommandPhaseType.RUN:\n return self.run_status\n else: # This should never happen.\n return CommandStatusType.UNKNOWN\n\n def get_log_count(self, phase: CommandPhaseType = None, severity: CommandStatusType = None) -> int:\n \"\"\"\n\n Args:\n phase: the command phase type (e.g., CommandPhaseType.RUN), or None to include all phases.\n severity: the severity (e.g., CommandStatusType.WARNING) of log messages to count\n\n Returns: the count of the log messages for the given phase and severity\n \"\"\"\n log_count = 0\n if phase is CommandPhaseType.INITIALIZATION or phase is None:\n for log_message in self.initialization_log_list:\n if log_message.severity is severity:\n log_count = log_count + 1\n if phase is CommandPhaseType.DISCOVERY or phase is None:\n for log_message in self.discovery_log_list:\n if log_message.severity is severity:\n log_count = log_count + 1\n if phase is CommandPhaseType.RUN or phase is None:\n for log_message in self.run_log_list:\n if log_message.severity is severity:\n log_count = log_count + 1\n return log_count\n\n def refresh_phase_severity(self, phase: CommandPhaseType, severity_if_unknown: CommandStatusType) -> None:\n \"\"\"\n Refresh the command status for a phase.\n This ensures that the command has a status even if no log messages were generated.\n This should normally only be called when initializing a status or setting to success.\n Otherwise, add_to_log() should be used and the status determined from the CommandLogRecord status values.\n\n Args:\n phase: Command phase, such as CommandPhaseType.RUN.\n severity_if_unknown: The severity to set for the phase if it is currently UNKNOWN.\n For example, specify as CommandStatusType.SUCCESS to override the\n initial CommandStatusType.UNKNOWN value.\n\n Returns:\n None.\n \"\"\"\n if phase is CommandPhaseType.INITIALIZATION:\n if self.initialization_status is CommandStatusType.UNKNOWN:\n self.initialization_status = severity_if_unknown\n elif phase is CommandPhaseType.DISCOVERY:\n if self.discovery_status is CommandStatusType.UNKNOWN:\n self.discovery_status = severity_if_unknown\n elif phase is CommandPhaseType.RUN:\n if self.run_status is CommandStatusType.UNKNOWN:\n self.run_status = severity_if_unknown\n","repo_name":"OpenWaterFoundation/owf-app-geoprocessor-python","sub_path":"src/geoprocessor/core/CommandStatus.py","file_name":"CommandStatus.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"31611059184","text":"def type_3(gr):\n p = []\n t = 0\n for i in gr:\n a = i[i.find('>')+1 :]\n if a.find('|') == -1:\n if a.islower():\n p.append(1)\n elif a == 'E':\n p.append(0)\n elif a[0].islower() and a[1].isupper(): # праволинейная\n p.append(2)\n elif a[0].isupper() and a[1].islower(): # леволинейная\n p.append(3)\n else:\n p.append(0)\n else:\n d = a.split('|')\n for j in d:\n if j.islower():\n p.append(1)\n elif j == 'E':\n p.append(0)\n elif j[0].islower() and j[1].isupper(): # праволинейная\n p.append(2)\n elif j[0].isupper() and j[1].islower(): # леволинейная\n p.append(3)\n else:\n p.append(0)\n print('p ',p)\n if (1 in set(p)) and (2 in set(p)) and (0 not in set(p)):\n print('грамматика типа 3, праволинейная')\n elif (1 in set(p)) and (3 in set(p)) and (0 not in set(p)):\n print('грамматика типа 3, леволинейная')\n else:\n return t \n \n\ndef type_1(gr):\n p = []\n t = 0\n for i in gr:\n a = i[i.find('>')+1 :]\n if a.find('|') == -1:\n if len(i[:i.find('-')]) <= len(a):\n p.append(1)\n else:\n p.append(0)\n else:\n d = a.split('|')\n for j in d:\n if len(i[:i.find('-')]) <= len(j):\n p.append(1)\n else:\n p.append(0)\n if set(p) == {1}:\n print('грамматика типа 1')\n else:\n return t\n'----------------------------------------------------------------------------------------------------------------------------------------'\n'''\n# ввод грамматик\n# S->aaS|B|E\n# B->b\n'''\ngr = ['S->Ca|Ba','C->c','B->E']\n\n# проверка на тип\nk = 1\nfor i in gr:\n if i[:i.find('-')].isupper(): # левая часть до '->' проверяется на нетерминальность\n #print('тип 2/3 или 0/1')\n pass\n else:\n #print('тип 0 или 1')\n k = 0\n\nif k == 0:\n t = type_1(gr)\n if t == 0:\n print('грамматика типа 0')\nelse:\n t = type_3(gr)\n if t == 0:\n print('грамматика типа 2')\n\n \n\n\n\n","repo_name":"Unti1/DSTU-study","sub_path":"3_semestr/TeoryAuto/laba_7_type_of_grammer.py","file_name":"laba_7_type_of_grammer.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"27584952412","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n.. module:: face_encoding\n :platform: Unix\n :synopsis: the top-level submodule of T_System that contains the classes related to T_System's encoding human face ability for recognizing them.\n\n.. moduleauthor:: Cem Baybars GÜÇLÜ \n\"\"\"\n\nimport face_recognition\nimport pickle\nimport cv2\nimport os # Miscellaneous operating system interfaces\nimport uuid # The random id generator\nimport binascii\n\nfrom imutils import paths\nfrom shutil import copy, rmtree\nfrom tinydb import Query # TinyDB is a lightweight document oriented database\nfrom base64 import b64decode\nfrom os import listdir\nfrom os.path import isfile, join\nfrom multipledispatch import dispatch\n\nfrom t_system.db_fetching import DBFetcher\n\nfrom t_system import dot_t_system_dir\nfrom t_system import log_manager\n\nlogger = log_manager.get_logger(__name__, \"DEBUG\")\n\n\nclass FaceEncodeManager:\n \"\"\"Class to define a face encode manager of tracking system..\n\n This class provides necessary initiations and a function named\n :func:`t_system.face_encoding.FaceEncodeManager.add_face` for the generating faces to creating encoding pickle file them.\n \"\"\"\n\n def __init__(self, detection_method=\"hog\"):\n \"\"\"Initialization method of :class:`t_system.face_encoding.FaceEncodeManager` class.\n\n Args:\n detection_method (str): face detection model\n \"\"\"\n self.detection_method = detection_method # either `hog` or `cnn`\n\n self.recognition_folder = f'{dot_t_system_dir}/recognition'\n self.encodings_folder = f'{self.recognition_folder}/encodings'\n self.dataset_folder = f'{self.recognition_folder}/dataset'\n self.main_encoding_file = f'{self.recognition_folder}/main_encoding.pickle'\n\n self.__check_folders()\n\n self.table = DBFetcher(self.recognition_folder, \"db\", \"faces\").fetch()\n\n self.face_encoder = FaceEncoder(detection_method)\n\n self.faces = []\n self.__refresh_faces()\n\n @dispatch(str, str)\n def add_face(self, name, dataset_folder):\n \"\"\"Method to create new face using given external dataset.\n\n Args:\n name (str): The name of the man who has face in dataset.\n dataset_folder (str): The path of the dataset that will be encoded.\n \"\"\"\n\n face = self.__get_face_of(name)\n\n if not face:\n face = Face(name)\n\n src_files = os.listdir(dataset_folder)\n for file_name in src_files:\n full_file_name = os.path.join(dataset_folder, file_name)\n if os.path.isfile(full_file_name):\n copy(full_file_name, face.dataset_folder)\n\n face.refresh_image_names()\n\n self.face_encoder.encode(face.dataset_folder, face.pickle_file, face.name)\n self.faces.append(face)\n\n @dispatch(str, list)\n def add_face(self, name, photos):\n \"\"\"Method to create new face using base64 encoded photos.\n\n Args:\n name (str): The name of the man who has face in dataset.\n photos (list): The person's raw photo data list. Contains list of {\"name\": \"photo_name\", \"base_sf\": \"Base64_encoded_data\"}.\n \"\"\"\n face = self.__get_face_of(name)\n\n if not face:\n face = Face(name)\n\n face.create_dataset_from_base_sf_photos(photos)\n\n self.face_encoder.encode(face.dataset_folder, face.pickle_file, face.name)\n self.faces.append(face)\n\n @dispatch(str, set)\n def add_face(self, name, photos):\n \"\"\"Method to creating image that will be dataset for recognizing person's face later from FileStorage object that is coming from HTML input form element via Flask.\n\n Args:\n name (str): The name of the man who has face in dataset.\n photos (set): The FileStorage object set. that is been converted to list for becoming its indexing.\n \"\"\"\n face = self.__get_face_of(name)\n\n if not face:\n face = Face(name)\n\n face.create_dataset_from_file_storage_object(list(photos))\n\n self.face_encoder.encode(face.dataset_folder, face.pickle_file, face.name)\n self.faces.append(face)\n\n def update_face(self, face_id, photos):\n \"\"\"Method to update face.\n\n Args:\n face_id (str): The id of the face.\n photos (list): The person's raw photo data list. Contains list of {\"name\": \"photo_name\", \"base_sf\": \"Base64_encoded_data\"}.\n \"\"\"\n\n for face in self.faces:\n if face.id == face_id:\n # ELIMINATION OF EXISTING PHOTOS WILL BE HERE\n face.create_dataset_from_base_sf_photos(photos)\n self.face_encoder.encode(face.dataset_folder, face.pickle_file, face.name)\n return True\n return False\n\n @dispatch()\n def get_faces(self):\n \"\"\"Method to return all existing faces.\n \"\"\"\n\n return self.faces\n\n @dispatch(list)\n def get_faces(self, ids):\n \"\"\"Method to return all existing faces.\n\n Args:\n ids (list): The id list of faces.\n \"\"\"\n\n faces = []\n\n for face in self.faces:\n if face.id in ids:\n faces.append(face)\n\n return faces\n\n def get_face(self, face_id):\n \"\"\"Method to return face via given face id.\n\n Args:\n face_id (str): The id of the face.\n \"\"\"\n\n for face in self.faces:\n if face.id == face_id:\n return face\n\n def delete_face(self, face_id):\n \"\"\"Method to delete face via given face id.\n\n Args:\n face_id (str): The id of the face.\n \"\"\"\n\n for face in self.faces:\n if face.id == face_id:\n face.remove_self()\n self.faces.remove(face) # for removing object from list\n return True\n return False\n\n def __refresh_faces(self):\n \"\"\"Method to get existing images from the database.\n \"\"\"\n self.faces.clear()\n\n faces = self.table.all()\n\n for face in faces:\n # face is {\"id\": face_id, \"name\": face_name, \"image_names\": []}\n self.faces.append(Face(face[\"name\"], face[\"id\"]))\n\n def __check_folders(self):\n \"\"\"Method to checking the necessary folders created before. If not created creates them.\n \"\"\"\n\n if not os.path.exists(self.recognition_folder):\n os.mkdir(self.recognition_folder)\n\n if not os.path.exists(self.encodings_folder):\n os.mkdir(self.encodings_folder)\n\n if not os.path.exists(self.dataset_folder):\n os.mkdir(self.dataset_folder)\n\n def __get_face_of(self, name):\n \"\"\"Method to checking name of uploaded photo's face is recorded before.\n\n Args:\n name (str): The name of the man who has face in dataset.\n \"\"\"\n\n for face in self.faces:\n if face.name == name:\n return face\n\n return None\n\n\nclass FaceEncoder:\n \"\"\"Class to define a face encoder of tracking system..\n\n This class provides necessary initiations and a function named\n :func:`t_system.face_encoder.FaceEncoder.encode` for the generating pickle files with given image dataset and\n \"\"\"\n\n def __init__(self, detection_method=\"hog\"):\n \"\"\"Initialization method of :class:`t_system.face_encoder.FaceEncoder` class.\n\n Args:\n detection_method (str): face detection model\n \"\"\"\n\n self.detection_method = detection_method # either `hog` or `cnn`\n self.is_creating_pickle_completed = False\n\n def encode(self, dataset_folder, pickle_file, face_name=None):\n \"\"\"Method to generate encoding pickle files from given dataset.\n\n Args:\n dataset_folder (str): The path of the dataset that will be encoded.\n pickle_file (str): The file that is keep faces's encoded data.\n face_name (str): The name of the man who has face in dataset.\n \"\"\"\n\n self.is_creating_pickle_completed = False\n logger.info(\"quantifying faces...\") # grab the paths to the input images in our dataset\n image_paths = list(paths.list_images(dataset_folder))\n\n known_encodings = []\n known_names = []\n name = face_name\n previous_name = \"\"\n different_name_count = 1\n\n for (i, image_path) in enumerate(image_paths):\n\n # extract the person name from the image path\n logger.info(f'processing image {i + 1}/{len(image_paths)}')\n if face_name is None:\n name = image_path.split(os.path.sep)[-2]\n\n if name != previous_name and previous_name:\n different_name_count += 1\n\n self.__write_to_pickle(pickle_file, known_encodings, known_names)\n\n known_encodings.clear()\n known_names.clear()\n image = cv2.imread(image_path)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n boxes = face_recognition.face_locations(rgb, model=self.detection_method)\n\n encodings = face_recognition.face_encodings(rgb, boxes)\n\n for encoding in encodings:\n\n known_encodings.append(encoding)\n known_names.append(name)\n\n previous_name = name\n\n if different_name_count == 1:\n self.__write_to_pickle(pickle_file, known_encodings, known_names)\n\n self.is_creating_pickle_completed = True\n\n def __write_to_pickle(self, pickle_file, known_encodings, known_names):\n \"\"\"Method to writing encodings and names to the given pickle file .\n\n Args:\n pickle_file (str): The file that is keep faces's encoded data\n known_encodings (list): Existing encodings inside the pickle file\n known_names (list): Existing names inside the pickle file\n \"\"\"\n\n existing_encodings, existing_names = self.__get_existing_encoding_data(pickle_file)\n known_encodings.extend(existing_encodings)\n known_names.extend(existing_names)\n\n data = {\"encodings\": known_encodings, \"names\": known_names}\n\n f = open(pickle_file, \"wb\")\n f.write(pickle.dumps(data))\n f.close()\n\n @staticmethod\n def __generate_main_pickle_file(encodings_folder, main_pickle_file):\n \"\"\"Method to generate main encoding file from existing face encodings via merging separated encoding pickle files to one.\n\n Args:\n encodings_folder (str): The folder that is keep all faces's encoded data files.\n main_pickle_file (str): The file that is keep merged all faces's encoded data.\n \"\"\"\n\n main_decoding = {\"encodings\": [], \"names\": []}\n\n encoding_files = list(paths.list_files(encodings_folder))\n\n for encoding_file in encoding_files:\n\n encoding = open(encoding_file, \"rb\")\n decoding = pickle.load(encoding)\n\n main_decoding[\"encodings\"].extend(decoding[\"encodings\"])\n main_decoding[\"names\"].extend(decoding[\"names\"])\n\n encoding.close()\n\n main_encoding = open(main_pickle_file, \"wb\")\n main_encoding.write(pickle.dumps(main_decoding))\n main_encoding.close()\n\n @staticmethod\n def __get_existing_encoding_data(pickle_file):\n \"\"\"Method to get existing encoding data from inside of given pickle file.\n\n Args:\n pickle_file (str): The file that is keep faces's encoded data\n \"\"\"\n\n if os.path.exists(pickle_file):\n data = pickle.loads(open(pickle_file, \"rb\").read())\n return data[\"encodings\"], data[\"names\"]\n\n return [], []\n\n def get_completion_status(self):\n \"\"\"Method to get completion status of face encoding.\n \"\"\"\n\n return self.is_creating_pickle_completed\n\n\nclass Face:\n \"\"\"Class to define a face that has dataset and encoding pickle file.\n\n This class provides necessary initiations and a function named :func:`t_system.face_encoding.Face.create_dataset_from_base_sf_photos`\n for the provide creating dataset images from the given photos list that is contains photos names and their bas64 encoded form.\n\n \"\"\"\n\n def __init__(self, name, id=None):\n \"\"\"Initialization method of :class:`t_system.face_encoder.Face` class.\n\n Args:\n name (str): The name of the man who has face in dataset.\n id (str): The id of the face.\n \"\"\"\n\n self.name = name\n self.id = id\n\n self.id = id\n if not id:\n self.id = str(uuid.uuid1())\n\n self.recognition_folder = f'{dot_t_system_dir}/recognition'\n self.dataset_folder = f'{self.recognition_folder}/dataset/{self.name}'\n self.pickle_file = f'{self.recognition_folder}/encodings/{self.name}_encoding.pickle'\n\n self.__check_folders()\n\n self.table = DBFetcher(self.recognition_folder, \"db\", \"faces\").fetch()\n\n self.image_names = []\n self.refresh_image_names()\n\n self.__db_upsert()\n\n def copy_images_to(self, dest):\n \"\"\"Method to copying image inside the dataset to the given destination folder.\n\n Args:\n dest (str): Destination folder to copying images those are inside the dataset.\n \"\"\"\n\n if not os.path.exists(dest):\n os.mkdir(dest)\n rmtree(dest)\n\n src_files = os.listdir(self.dataset_folder)\n for file_name in src_files:\n full_file_name = os.path.join(self.dataset_folder, file_name)\n if os.path.isfile(full_file_name):\n copy(full_file_name, dest)\n\n def __db_upsert(self, force_insert=False):\n \"\"\"Function to insert(or update) the face to the database.\n\n Args:\n force_insert (bool): Force insert flag.\n\n Returns:\n str: Response.\n \"\"\"\n\n if self.table.search((Query().id == self.id)):\n if force_insert:\n # self.already_exist = False\n self.table.update({'name': self.name, 'image_names': self.image_names}, Query().id == self.id)\n\n else:\n # self.already_exist = True\n return \"Already Exist\"\n else:\n self.table.insert({\n 'id': self.id,\n 'name': self.name,\n 'image_names': self.image_names\n }) # insert the given data\n\n return \"\"\n\n def refresh_image_names(self, use_db=False):\n \"\"\"The top-level method to reload the image_names from given source flag.\n\n Args:\n use_db (bool): Refreshing source flag. False is for using directly by scanning dataset folder\n \"\"\"\n\n if use_db:\n face = self.table.search((Query().id == self.id))[0]\n\n self.image_names = face[\"image_names\"]\n else:\n\n self.image_names = [f for f in listdir(self.dataset_folder) if isfile(join(self.dataset_folder, f))]\n\n def delete_images(self, image_names):\n \"\"\"Method to deleting images via given image names.\n\n Args:\n image_names (list): The name list of the images those inside the dataset.\n \"\"\"\n\n for image_name in image_names:\n self.image_names.remove(image_name)\n os.remove(f'{self.dataset_folder}/{image_name}')\n\n self.__db_upsert(force_insert=True)\n\n def create_dataset_from_base_sf_photos(self, photos):\n \"\"\"Method to creating image that will be dataset for recognizing person's face later from base64 encoded string photo data.\n\n Args:\n photos (list): The person's raw photo data list. Contains list of {\"name\": \"photo_name\", \"base_sf\": \"Base64_encoded_data\"}.\n\n Returns:\n str: dataset.\n \"\"\"\n\n for photo in photos:\n with open(f'{self.dataset_folder}/{photo[\"name\"]}', \"wb\") as dataset_image:\n try:\n dataset_image.write(b64decode(photo[\"base_sf\"]))\n except binascii.Error:\n raise Exception(\"no correct base64\")\n\n self.refresh_image_names(use_db=True)\n self.__db_upsert(force_insert=True)\n\n def create_dataset_from_file_storage_object(self, photos):\n \"\"\"Method to creating image that will be dataset for recognizing person's face later from FileStorage object that is coming from HTML input form element via Flask.\n\n Args:\n photos (list): The FileStorage object list.\n\n Returns:\n str: dataset.\n \"\"\"\n\n for photo in photos:\n photo.save(f'{self.dataset_folder}/{photo.filename}')\n\n self.refresh_image_names()\n self.__db_upsert(force_insert=True)\n\n def __check_folders(self):\n \"\"\"The low-level method to checking the necessary folders created before. If not created creates them.\n \"\"\"\n\n if not os.path.exists(self.recognition_folder):\n os.mkdir(self.recognition_folder)\n\n if not os.path.exists(self.dataset_folder):\n os.mkdir(self.dataset_folder)\n\n def remove_self(self):\n \"\"\"Method to remove face itself.\n \"\"\"\n\n rmtree(self.dataset_folder)\n os.remove(self.pickle_file)\n\n self.table.remove((Query().id == self.id))\n","repo_name":"LookAtMe-Genius-Cameraman/T_System","sub_path":"t_system/face_encoding.py","file_name":"face_encoding.py","file_ext":"py","file_size_in_byte":17506,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"22"} +{"seq_id":"472318765","text":"from flask_restful import Resource, Api\nfrom notes import app, api\n\nimport notes.resources.Config as Config\nfrom notes.resources.Notifications \\\n import Notifications, \\\n NotificationsClear, \\\n NotificationGetter, \\\n NotificationAdder\nfrom notes.resources.Lock import Lock, Unlock\n\nclass Helper(Resource):\n def get(self):\n action_list = []\n\n with app.test_request_context():\n notifications = Notifications()\n notification_list = notifications.get()\n\n action_list.append({\n 'href':api.url_for(Helper),\n 'rel':'self',\n 'method':'GET',\n 'description':'Display routes available.'})\n action_list.append({\n 'href':api.url_for(Notifications),\n 'rel':'notifications',\n 'method':'GET',\n 'description':'Display notifications.'})\n action_list.append({\n 'href':api.url_for(NotificationAdder),\n 'rel':'notification',\n 'method':'POST',\n 'description':'Add a new notification.',\n 'comment':'{\\'note\\':\\'\\', '+ \\\n '\\'action\\':\\'\\'}'})\n\n for note in notification_list['notifications']:\n action_list.append({\n 'href':note['href'],\n 'method':['GET','PUT','DELETE'],\n 'description':'Get notification ' + str(note['id'])\n })\n# action_list.append({\n# 'href':note['href'],\n# 'method':'PUT',\n# 'description':'Update notification ' + str(note['id']),\n# 'comment':'{\\'note\\':\\'\\', '+ \\\n# '\\'action\\':\\'\\'}'\n# })\n\n action_list.append({\n 'href':api.url_for(Lock),\n 'rel':'lock',\n 'method':'GET',\n 'description':'Get device locked status.'})\n action_list.append({\n 'href':api.url_for(Lock),\n 'rel':'lock',\n 'method':'PUT',\n 'description':'Lock the device.'})\n action_list.append({\n 'href':api.url_for(Unlock, unlock_code=9999),\n 'rel':'unlock',\n 'method':'PUT',\n 'description':'Unlock the device with the code 9999.'})\n links = {'_links':action_list}\n return links\n\napi.add_resource(Notifications, '/notifications')\napi.add_resource(NotificationGetter, '/notification/')\napi.add_resource(NotificationAdder, '/notification')\napi.add_resource(NotificationsClear, '/clear')\napi.add_resource(Lock, '/lock')\napi.add_resource(Unlock, '/unlock/')\napi.add_resource(Helper, '/')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"davidjsanders/studyWork","sub_path":"stage1/old/notesuwsgi/notes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"43051844470","text":"\"\"\"This module contains the general information for EquipmentStorageLimitCap ManagedObject.\"\"\"\n\nfrom ...ucsmo import ManagedObject\nfrom ...ucscoremeta import MoPropertyMeta, MoMeta\nfrom ...ucsmeta import VersionMeta\n\n\nclass EquipmentStorageLimitCapConsts:\n MAX_LUNS_PER_DG_UNDEFINED = \"undefined\"\n ME4308_SUPPORTED_FALSE = \"false\"\n ME4308_SUPPORTED_NO = \"no\"\n ME4308_SUPPORTED_TRUE = \"true\"\n ME4308_SUPPORTED_YES = \"yes\"\n\n\nclass EquipmentStorageLimitCap(ManagedObject):\n \"\"\"This is EquipmentStorageLimitCap class.\"\"\"\n\n consts = EquipmentStorageLimitCapConsts()\n naming_props = set([])\n\n mo_meta = MoMeta(\"EquipmentStorageLimitCap\", \"equipmentStorageLimitCap\", \"storage-limit\", VersionMeta.Version251a, \"InputOutput\", 0x1f, [], [\"read-only\"], ['equipmentLocalDiskControllerCapProvider'], [], [\"Get\"])\n\n prop_meta = {\n \"child_action\": MoPropertyMeta(\"child_action\", \"childAction\", \"string\", VersionMeta.Version251a, MoPropertyMeta.INTERNAL, 0x2, None, None, r\"\"\"((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}\"\"\", [], []),\n \"dn\": MoPropertyMeta(\"dn\", \"dn\", \"string\", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),\n \"max_luns\": MoPropertyMeta(\"max_luns\", \"maxLuns\", \"ushort\", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),\n \"max_luns_per_dg\": MoPropertyMeta(\"max_luns_per_dg\", \"maxLunsPerDG\", \"string\", VersionMeta.Version312b, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"undefined\"], [\"0-65535\"]),\n \"me4308_supported\": MoPropertyMeta(\"me4308_supported\", \"me4308Supported\", \"string\", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, None, None, None, None, [\"false\", \"no\", \"true\", \"yes\"], []),\n \"rn\": MoPropertyMeta(\"rn\", \"rn\", \"string\", VersionMeta.Version251a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),\n \"sacl\": MoPropertyMeta(\"sacl\", \"sacl\", \"string\", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, None, None, r\"\"\"((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}\"\"\", [], []),\n \"status\": MoPropertyMeta(\"status\", \"status\", \"string\", VersionMeta.Version251a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r\"\"\"((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}\"\"\", [], []),\n }\n\n prop_map = {\n \"childAction\": \"child_action\", \n \"dn\": \"dn\", \n \"maxLuns\": \"max_luns\", \n \"maxLunsPerDG\": \"max_luns_per_dg\", \n \"me4308Supported\": \"me4308_supported\", \n \"rn\": \"rn\", \n \"sacl\": \"sacl\", \n \"status\": \"status\", \n }\n\n def __init__(self, parent_mo_or_dn, **kwargs):\n self._dirty_mask = 0\n self.child_action = None\n self.max_luns = None\n self.max_luns_per_dg = None\n self.me4308_supported = None\n self.sacl = None\n self.status = None\n\n ManagedObject.__init__(self, \"EquipmentStorageLimitCap\", parent_mo_or_dn, **kwargs)\n","repo_name":"CiscoUcs/ucsmsdk","sub_path":"ucsmsdk/mometa/equipment/EquipmentStorageLimitCap.py","file_name":"EquipmentStorageLimitCap.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"22"} +{"seq_id":"72092211257","text":"import plotly.express as px\r\nimport pandas as pd\r\nimport plotly.figure_factory as ff\r\nimport statistics\r\nimport random\r\nimport plotly.graph_objects as go\r\n\r\ndiceRes=[]\r\ndf=pd.read_csv(\"StudentsPerformance.csv\")\r\nscore=df[\"math score\"].tolist()\r\nfor i in range(0,1000):\r\n dice1=random.randint(1,6)\r\n dice2=random.randint(1,6)\r\n diceRes.append(dice1+dice2)\r\n\r\ns1=sum(score)\r\ns2=len(score)\r\nmean=s1/s2\r\nprint(mean)\r\nstd_deviation=statistics.stdev(score)\r\nprint(std_deviation)\r\nstd_median=statistics.median(score)\r\nstd_mode=statistics.mode(score)\r\nprint(std_median)\r\nprint(std_mode)\r\n\r\nfig=ff.create_distplot([score],[\"Student Scores\"],show_hist=False)\r\nfirst_std_start,first_std_end=mean-std_deviation,mean+std_deviation\r\nsecond_std_start,second_std_end=mean-(2*std_deviation),mean+(2*std_deviation)\r\nthird_std_start,third_std_end=mean-(3*std_deviation),mean+(2*std_deviation)\r\nlistOfOneStandardDev=[result for result in score if result>first_std_start and resultsecond_std_start and resultthird_std_start and result>> \"+reqdata)\r\n ws.send(reqdata)\r\n result = ws.recv()\r\n print(\"<<< \" + result)\r\n if result == '':\r\n return {}\r\n return json.loads(result)\r\n\r\ndef run_test(endpoint):\r\n global requestId\r\n requestId = 1\r\n\r\n try:\r\n print(\"connecting\")\r\n ws = create_connection(endpoint,sslopt={'context':my_context})\r\n print(\"connected\")\r\n\r\n try:\r\n print(\"sending initial handshake\")\r\n send_request({'action':'handshake'},ws)\r\n except Exception as e:\r\n print('Exception: ', str(e))\r\n\r\n print(\"Closing connection\")\r\n ws.close()\r\n except Exception as e:\r\n print('Exception: ', str(e))\r\n\r\nprint('Testing gamesparks')\r\nrun_test('wss://preview-v368622fnPKd.ws.gamesparks.net/ws/dedicatedServer/v368622fnPKd')\r\n\r\nprint('Testing AWS')\r\nrun_test('wss://iz0k29t5mh.execute-api.us-west-2.amazonaws.com/preview')\r\n\r\ninput(\"Press Enter to continue...\")","repo_name":"Neocore-Honya/websocket_test","sub_path":"ws_test.py","file_name":"ws_test.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"28309025095","text":"from typing import Optional, List\nimport numpy as np\nimport scipy.linalg as spl\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\nimport copy\n\nfrom ..autogluon.hp_ranges import HyperparameterRanges_CS\nfrom ..datatypes.common import CandidateEvaluation\nfrom ..datatypes.tuning_job_state import TuningJobState\nfrom ..models.gp_model import get_internal_candidate_evaluations\nfrom ..tuning_algorithms.default_algorithm import dictionarize_objective, \\\n DEFAULT_METRIC\n\n\nclass ThreeHumpCamel(object):\n @property\n def search_space(self):\n return [{'min': -5.0, 'max': 5.0},\n {'min': -5.0, 'max': 5.0}]\n\n def evaluate(self, x1, x2):\n return 2 * x1 ** 2 - 1.05 * x1 ** 4 + x1 ** 6 / 6 + x1 * x2 + x2 ** 2\n\n\ndef branin_function(x1, x2, r=6):\n return (x2 - (5.1 / (4 * np.pi ** 2)) * x1 ** 2 + (5 / np.pi) * x1 - r) ** 2 + \\\n 10 * (1 - 1 / (8 * np.pi)) * np.cos(x1) + 10\n\n\nclass Branin(object):\n @property\n def search_space(self):\n return [{'min': -5.0, 'max': 10.0},\n {'min': 0.0, 'max': 15.0}]\n\n def evaluate(self, x1, x2):\n return branin_function(x1, x2)\n\n\nclass BraninWithR(Branin):\n def __init__(self, r):\n self.r = r\n\n def evaluate(self, x1, x2):\n return branin_function(x1, x2, r=self.r)\n\n\nclass Ackley(object):\n @property\n def search_space(self):\n const = 32.768\n return [{'min': -const, 'max': const},\n {'min': -const, 'max': const}]\n\n def evaluate(self, x1, x2):\n a = 20\n b = 0.2\n c = 2 * np.pi\n ssq = (x1 ** 2) + (x2 ** 2)\n scos = np.cos(c * x1) + np.cos(c * x2)\n return -a * np.exp(-b * np.sqrt(0.5 * ssq)) - np.exp(0.5 * scos) + \\\n (a + np.exp(1))\n\n\nclass SimpleQuadratic(object):\n @property\n def search_space(self):\n return [{'min': 0.0, 'max': 1.0},\n {'min': 0.0, 'max': 1.0}]\n\n def evaluate(self, x1, x2):\n return 2 * (x1 - 0.5)**2 + (x2 - 0.5)**2\n\n\ndef _decode_input(x, lim):\n mn, mx = lim['min'], lim['max']\n return x * (mx - mn) + mn\n\n\ndef evaluate_blackbox(bb_func, inputs: np.ndarray) -> np.ndarray:\n num_dims = inputs.shape[1]\n input_list = []\n for x, lim in zip(np.split(inputs, num_dims, axis=1), bb_func.search_space):\n input_list.append(_decode_input(x, lim))\n return bb_func.evaluate(*input_list)\n\n\n# NOTE: Inputs will always be in [0, 1] (so come in encoded form). They are\n# only scaled to their native ranges (linearly) when evaluations of the\n# blackbox are done. This avoids silly errors.\ndef sample_data(\n bb_cls, num_train: int, num_grid: int,\n expand_datadct: bool = True) -> dict:\n bb_func = bb_cls()\n ss_limits = bb_func.search_space\n num_dims = len(ss_limits)\n # Sample training inputs\n train_inputs = np.random.uniform(\n low=0.0, high=1.0, size=(num_train, num_dims))\n # Training targets (currently, no noise is added)\n train_targets = evaluate_blackbox(bb_func, train_inputs).reshape((-1,))\n # Inputs for prediction (regular grid)\n grids = [np.linspace(0.0, 1.0, num_grid)] * num_dims\n grids2 = tuple(np.meshgrid(*grids))\n test_inputs = np.hstack([x.reshape(-1, 1) for x in grids2])\n # Also evaluate true function on grid\n true_targets = evaluate_blackbox(bb_func, test_inputs).reshape((-1,))\n data = {\n 'ss_limits': ss_limits,\n 'train_inputs': train_inputs,\n 'train_targets': train_targets,\n 'test_inputs': test_inputs,\n 'grid_shape': grids2[0].shape,\n 'true_targets': true_targets}\n if expand_datadct:\n # Make sure that ours and GPy below receive exactly the same inputs\n data = expand_data(data)\n return data\n\n\ndef expand_data(data: dict) -> dict:\n \"\"\"\n Appends derived entries to data dict, which have non-elementary types.\n \"\"\"\n if 'state' not in data:\n data = copy.copy(data)\n state = data_to_state(data)\n data_internal = get_internal_candidate_evaluations(\n state, active_metric=DEFAULT_METRIC, normalize_targets=True,\n num_fantasize_samples=20)\n data['state'] = state\n data['train_inputs'] = data_internal.X\n data['train_targets_normalized'] = data_internal.y\n return data\n\n\n# Recall that inputs in data are encoded, so we have to decode them to their\n# native ranges for candidate_evaluations\ndef data_to_state(data: dict) -> TuningJobState:\n configs, cs = decode_inputs(data['train_inputs'], data['ss_limits'])\n _evaluations = [\n CandidateEvaluation(config, dictionarize_objective(y))\n for config, y in zip(configs, data['train_targets'])]\n return TuningJobState(\n hp_ranges=HyperparameterRanges_CS(cs),\n candidate_evaluations=_evaluations,\n failed_candidates=[],\n pending_evaluations=[])\n\n\ndef decode_inputs(inputs: np.ndarray, ss_limits) -> \\\n (List[CS.Configuration], CS.ConfigurationSpace):\n cs = CS.ConfigurationSpace()\n cs_names = ['x{}'.format(i) for i in range(len(ss_limits))]\n cs.add_hyperparameters([\n CSH.UniformFloatHyperparameter(\n name=name, lower=lims['min'], upper=lims['max'])\n for name, lims in zip(cs_names, ss_limits)])\n x_mult = []\n x_add = []\n for lim in ss_limits:\n mn, mx = lim['min'], lim['max']\n x_mult.append(mx - mn)\n x_add.append(mn)\n x_mult = np.array(x_mult)\n x_add = np.array(x_add)\n configs = []\n for x in inputs:\n x_decoded = x * x_mult + x_add\n config_dct = dict(zip(cs_names, x_decoded))\n configs.append(CS.Configuration(cs, values=config_dct))\n return configs, cs\n\n\ndef assert_equal_candidates(candidates1, candidates2, hp_ranges, decimal=5):\n inputs1 = hp_ranges.to_ndarray_matrix(candidates1)\n inputs2 = hp_ranges.to_ndarray_matrix(candidates2)\n np.testing.assert_almost_equal(inputs1, inputs2, decimal=decimal)\n\n\ndef assert_equal_randomstate(randomstate1, randomstate2):\n assert str(randomstate1.get_state()) == str(randomstate2.get_state())\n\n\ndef compare_gpy_predict_posterior_marginals(\n test_intermediates: dict, noise_variance_gpy: Optional[float] = None):\n \"\"\"\n Compares all intermediates of cholesky_computations and\n predict_posterior_marginals to using GPy and NumPy.\n\n Currently, this is restricted:\n - Kernel must be Matern52 with ARD\n - Mean function must be constant 0\n\n :param test_intermediates: Intermediates computed using our code\n :param noise_variance_gpy: Overrides noise_variance in test_intermediates.\n Use this if jitter was added during the posterior state computation.\n\n \"\"\"\n import GPy\n # Create GPy kernel and model\n num_data = test_intermediates['features'].shape[0]\n num_dims = test_intermediates['features'].shape[1]\n lengthscales = [\n 1.0 / test_intermediates['inv_bw{}'.format(i)]\n for i in range(num_dims)]\n kernel = GPy.kern.Matern52(\n num_dims,\n variance=test_intermediates['covariance_scale'],\n lengthscale=lengthscales,\n ARD=True)\n if noise_variance_gpy is None:\n noise_variance_gpy = test_intermediates['noise_variance']\n model = GPy.models.GPRegression(\n test_intermediates['features'],\n test_intermediates['targets'].reshape((-1, 1)),\n kernel=kernel, noise_var=noise_variance_gpy)\n # Compare intermediates step by step (cholesky_computations)\n kernel_mat_gpy = kernel.K(test_intermediates['features'], X2=None)\n np.testing.assert_almost_equal(\n test_intermediates['kernel_mat'], kernel_mat_gpy, decimal=5)\n sys_mat_gpy = kernel_mat_gpy + np.diag(np.ones(num_data)) * \\\n noise_variance_gpy\n np.testing.assert_almost_equal(\n test_intermediates['sys_mat'], sys_mat_gpy, decimal=5)\n chol_fact_gpy = spl.cholesky(sys_mat_gpy, lower=True)\n # Use test_intermediates['sys_mat'] instead:\n #chol_fact_gpy = spl.cholesky(test_intermediates['sys_mat'], lower=True)\n np.testing.assert_almost_equal(\n test_intermediates['chol_fact'], chol_fact_gpy, decimal=4)\n # Mean function must be constant 0\n centered_y = test_intermediates['targets'].reshape((-1, 1))\n np.testing.assert_almost_equal(\n test_intermediates['centered_y'], centered_y, decimal=9)\n pred_mat_gpy = spl.solve_triangular(chol_fact_gpy, centered_y, lower=True)\n np.testing.assert_almost_equal(\n test_intermediates['pred_mat'], pred_mat_gpy, decimal=3)\n # Compare intermediates step by step (predict_posterior_marginals)\n k_tr_te_gpy = kernel.K(test_intermediates['features'],\n X2=test_intermediates['test_features'])\n np.testing.assert_almost_equal(\n test_intermediates['k_tr_te'], k_tr_te_gpy, decimal=5)\n linv_k_tr_te_gpy = spl.solve_triangular(chol_fact_gpy, k_tr_te_gpy, lower=True)\n np.testing.assert_almost_equal(\n test_intermediates['linv_k_tr_te'], linv_k_tr_te_gpy, decimal=4)\n pred_means_gpy = np.dot(linv_k_tr_te_gpy.T, pred_mat_gpy)\n np.testing.assert_almost_equal(\n test_intermediates['pred_means'], pred_means_gpy, decimal=4)\n k_tr_diag_gpy = kernel.Kdiag(\n test_intermediates['test_features']).reshape((-1,))\n tvec_gpy = np.sum(np.square(linv_k_tr_te_gpy), axis=0).reshape((-1,))\n pred_vars_gpy = k_tr_diag_gpy - tvec_gpy\n np.testing.assert_almost_equal(\n test_intermediates['pred_vars'], pred_vars_gpy, decimal=4)\n # Also test against GPy predict\n pred_means_gpy2, pred_vars_gpy2 = model.predict(\n test_intermediates['test_features'], include_likelihood=False)\n pred_vars_gpy2 = pred_vars_gpy2.reshape((-1,))\n np.testing.assert_almost_equal(pred_means_gpy, pred_means_gpy2, decimal=3)\n np.testing.assert_almost_equal(pred_vars_gpy, pred_vars_gpy2, decimal=3)\n","repo_name":"submission001/anonymoussubmission_automl","sub_path":"autogluon/core/src/autogluon/core/searcher/bayesopt/utils/comparison_gpy.py","file_name":"comparison_gpy.py","file_ext":"py","file_size_in_byte":9851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"33885521524","text":"from base64 import b64encode\nimport requests \nimport json\n\nAPI_KEY = \"AIzaSyBRcVPi1HMQBQeffz4wIn92kdBNNZ0wXpM\"\nURL = \"https://vision.googleapis.com/v1/images:annotate?key={}\".format(API_KEY)\n\ndef make_request_json(buffer):\n image_json = {\n 'content': b64encode(buffer).decode('UTF-8')\n }\n\n feature_json = {\n \"type\": \"LABEL_DETECTION\",\n \"maxResults\": \"10\"\n }\n\n request = {\n \"requests\": {\n \"image\": image_json,\n \"features\": feature_json\n }\n }\n\n return json.dumps(request)\n\ndef send_request(request_json):\n response = requests.post(url=URL,\n data=request_json,\n headers={\"Content-Type\": \"application/json\"}) \n response_dict = json.loads(response.text)\n\n return response_dict[\"responses\"][0][\"labelAnnotations\"]\n\ndef determine_trash(labelAnnotations):\n for labelAnnotation in labelAnnotations:\n description = labelAnnotation[\"description\"]\n if \"bottle\" in description or \"plastic\" in description:\n return \"bottle\"\n elif \"can\" in description:\n return \"can\"\n","repo_name":"universse/recycleable","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"41685741419","text":"import requests\nfrom lxml import etree\nimport re\nimport streamlit as st\ndef get_viedo(bilibili_url):\n with st.spinner('Wait for it...'):\n url=bilibili_url\n try:\n st.header('下面是该视频的音频和视频')\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36 Edg/105.0.1343.53',\n 'cookie':\"buvid3=E8854EBD-E4BF-04B5-29C7-D9C2F1556CBD36385infoc; i-wanna-go-back=-1; _uuid=10CD96C36-B72F-92EF-3387-E2D7C1104E10EC37078infoc; buvid_fp=02b331c2843088f1f0807f484002d8e5; DedeUserID=1482172628; DedeUserID__ckMd5=e0a0fde62d6df8d1; LIVE_BUVID=AUTO9916622943658682; nostalgia_conf=-1; CURRENT_BLACKGAP=0; rpdid=|(J|~YkYJYR~0J'uYYkkmRYl|; b_ut=5; is-2022-channel=1; fingerprint3=422debf2fe4e989460ab9d489e170256; hit-dyn-v2=1; b_nut=100; blackside_state=1; buvid4=A2624E58-6142-0769-6562-952F0BC4C2CA37882-022090420-qz8RgjSxjvRz0D%2FGMBrHuw%3D%3D; fingerprint=2eee4eaff4ec547d4cb609306f4ca5f9; CURRENT_QUALITY=80; SESSDATA=d5481655%2C1680267023%2C28b64%2Aa1; bili_jct=9e3a77dab6036ae77211714e6a06aa53; sid=808exllu; CURRENT_FNVAL=4048; b_lsid=C8A104A810_1839D719071; theme_style=light; bp_video_offset_1482172628=712769644222480400; PVID=5\",\n 'referer': 'https://www.bilibili.com/video/BV1m54y1Y7nR/?spm_id_from=autoNext&vd_source=9c73a36d562e50cb87942b740ace868b'\n\n }\n response = requests.get(url=url,headers=headers)\n data = response.text\n html_obj = etree.HTML(data)\n title=html_obj.xpath('//div/div//h1/@title')[0]\n url_str=html_obj.xpath('//script[contains(text(),\"window.__playinfo__\")]/text()') [0]\n video_url=re.findall(r'\"video\":\\[{\"id\":\\d+,\"baseUrl\":\"(.*?)\",',url_str)[0]\n audio_url=re.findall(r'\"audio\":\\[{\"id\":\\d+,\"baseUrl\":\"(.*?)\",',url_str)[0]\n response_video_url=requests.get(url=video_url,headers=headers)\n response_audio_url=requests.get(url=audio_url,headers=headers)\n data_video_url=response_video_url.content\n data_audio_url=response_audio_url.content\n with open (f\"./视频音频/{title}.mp4\",'wb') as f:\n f.write(data_video_url)\n with open(f\"./视频音频/{title}.mp3\", 'wb') as f:\n f.write(data_audio_url)\n # audio = ffmpeg.input(f\"./视频音频/{title}.mp3\")\n # video = ffmpeg.input(f\"./视频音频/{title}.mp4\")\n # out = ffmpeg.output(video, audio,f\"./视频音频/{title}_finally.mp4\")\n # out.run()\n # os.system(f'./ffmpeg.exe -i \"./视频音频/{title}.mp4\" -i \"./视频音频/{title}.mp3\" -c copy \"./视频音频/{title}_finally.mp4\" ')\n\n audio_file = open(f\"./视频音频/{title}.mp3\", 'rb')\n audio_bytes = audio_file.read()\n st.video(audio_bytes)\n with open(f\"./视频音频/{title}.mp3\", \"rb\") as file:\n btn = st.download_button(\n label=\"你可以点击这个按钮将这个音频下载下来\",\n data=file,\n file_name=\"视频.mp3\",\n mime=\"mp3\"\n )\n video_file = open(f\"./视频音频/{title}.mp4\", 'rb')\n video_bytes = video_file.read()\n st.video(video_bytes)\n with open(f\"./视频音频/{title}.mp4\", \"rb\") as file:\n btn = st.download_button(\n label=\"你可以点击这个按钮将这个视频下载下来\",\n data=file,\n file_name=\"视频.mp4\",\n mime=\"mp4/avi\"\n )\n\n os.remove(f\"./视频音频/{title}.mp4\")\n os.remove(f\"./视频音频/{title}.mp3\")\n except :\n pass\n","repo_name":"liwenkai-source/my_streamlit_app","sub_path":"my_module/get_viedo.py","file_name":"get_viedo.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"41532551306","text":"class Solution:\n def mergeSimilarItems(self, items1: list[list[int]], items2: list[list[int]]) -> list[list[int]]:\n total = {}\n for item in items1:\n if item[0] not in total:\n total[item[0]] = item[1]\n else:\n total[item[0]] += item[1]\n for item in items2:\n if item[0] not in total:\n total[item[0]] = item[1]\n else:\n total[item[0]] += item[1]\n return sorted([[key, total[key]] for key in total], key=lambda x:x[0])\n\ns = Solution()\nres = s.mergeSimilarItems([[1,1],[4,5],[3,8]],[[3,1],[1,5]])\nprint(res)\nres = s.mergeSimilarItems([[1,1],[3,2],[2,3]],[[2,1],[3,2],[1,3]])\nprint(res)\nres = s.mergeSimilarItems([[1,3],[2,2]],[[7,1],[2,2],[1,4]])\nprint(res)","repo_name":"BobbyRobillard/CodingChallenges","sub_path":"LeetCode/BiWeeklyCompetitions/Bi-Weekly-84/6141-Merge-Similar.py","file_name":"6141-Merge-Similar.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2728725729","text":"'''\r\nWrite a program that accepts a sequence of whitespace separated words\r\nas input and prints the words after removing all duplicate words\r\nand sorting them alphanumerically.\r\nSuppose the following input is supplied to the program:\r\nhello world and practice makes perfect and hello world again\r\nThen, the output should be: again and hello makes perfect practice world\r\n'''\r\n\r\ntext=input(\"Enter text: \")\r\nwords=text.split(\" \")\r\n#print(words)\r\n\r\nunique_words=[]\r\nfor word in words:\r\n if word not in unique_words:\r\n unique_words.append(word)\r\n\r\nunique_words.sort(reverse=False)\r\nfor word in unique_words:\r\n print(word,end=\" \")","repo_name":"patilvikas0205/python-Exercise","sub_path":"Assignment_No_5.py","file_name":"Assignment_No_5.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"11447152833","text":"# 1\nfor i in range(10):\n print(i+1)\n\n# 2\ninput_string = input(\"Введите строку \")\nfor symbol in input_string:\n print(symbol)\n\n# 3\n\n\ndef get_mid_mark(school_marks):\n middle_mark_school = 0\n class_middle_marks = []\n for school_class_scores in school_marks:\n class_mid_mark = sum(school_class_scores['scores'])/\\\n len(school_class_scores['scores'])\n class_middle_marks.append({\"name\": school_class_scores[\"name\"],\n \"mid_mark\": class_mid_mark})\n middle_mark_school += class_mid_mark\n return class_middle_marks, middle_mark_school/len(school_marks)\n\n\nschool_scores = [{'name': '4a', 'scores': [1, 1, 2, 5, 3]},\n {'name': '4б', 'scores': [5, 3, 5, 2, 5]},\n {'name': '4в', 'scores': [5, 4, 5, 5, 2, 2, 5]},\n ]\n\nclasses_mid_marks, mid_mark_school = get_mid_mark(school_scores)\n\nprint(classes_mid_marks)\nprint(mid_mark_school)\n","repo_name":"ZDaria/learn_python","sub_path":"homeworks/week_2/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"9215999584","text":"import gensim\nimport matplotlib.pyplot as plt\nimport pickle\nimport numpy as np\nfrom scipy.interpolate import spline\n\n#load pickle\npickle_in = open(\"pickle_data.pickle\",\"rb\")\np_dict = pickle.load(pickle_in)\npickle_in.close()\n\ndf9_yrs = p_dict['df9_yrs']\ndistinct_yrs = p_dict['distinct_yrs']\ncorpus = p_dict['corpus']\n\n\n# returns\n# topic_yr_map = {1987: [23.477306301575261, 17.74636315217273, 26.847394613504079, 11.997810091403467, 6.4015614721370984,\n# doc_yr_count = {1987: 90, 1988: 94, 1989: 101, 1990: 143, 1991: 144, 1992: 127, 1993: 158,\ndef avg_topic_probability():\n for i in range(len(corpus)):\n text = corpus[i]\n yr = df9_yrs[i]\n\n topic_prob = model[corpus[i]]\n doc_yr_count[yr] += 1\n\n cnt = 0\n for x in topic_prob:\n topic_yr_map[yr][cnt] += x[1]\n cnt += 1\n return topic_yr_map, doc_yr_count\n\ndef choose_max_prob_topic():\n for i in range(len(corpus)):\n text = corpus[i]\n yr = df9_yrs[i]\n\n topic_prob = model[corpus[i]]\n doc_yr_count[yr] += 1\n\n # max_tuple = max(topic_prob,key=lambda item:item[1])\n max_tuple = max(topic_prob)\n topic_yr_map[yr][max_tuple[0]] += 1\n\n\n return topic_yr_map, doc_yr_count\n\n# returns {1987: [2.6085895890639179, 1.9718181280191922, 2.983043845944898, 1.3330900101559409, 0.71128460801523308,...\ndef normalize_for_viz(x,y,yrs):\n for yr in yrs:\n temp = []\n for i in y[yr]:\n temp.append(i/ x[yr])\n # temp.append(i * 10 / x[yr])\n y[yr] = temp\n return y\n\n#a = topic_yr_map\n#x = distinct_yrs\ndef viz_tot(a, x): # a = yr_topic_map\n a_vals = list(a.values())\n y = list(zip(*a_vals))\n\n xnew = np.linspace(min(x),max(x),200) #300 represents number of points to make between T.min and T.max\n\n for i in range(0,10):\n power_smooth = spline(x,y[i],xnew)\n plt.plot(xnew,power_smooth, label=\"Topic\"+str(i))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.show()\n\n\n# Load model\nmodel = gensim.models.ldamodel.LdaModel.load('output/model.atmodel')\n\n# Initialization : topic prob distribution for each year\n# 1987:[0....0], 1988:[0,..0],\ntopic_yr_map = {}\ndoc_yr_count = {}\nfor i in distinct_yrs:\n # topic_yr_map[i] = [0]*len(distinct_yrs)\n topic_yr_map[i] = [0]*10\n doc_yr_count[i] = 0\n\n\n# topic_yr_map, doc_yr_count = avg_topic_probability() # TOT - 1\ntopic_yr_map, doc_yr_count = choose_max_prob_topic() # TOT - 2\ntopic_yr_map = normalize_for_viz(doc_yr_count, topic_yr_map, distinct_yrs)\n\nviz_tot(topic_yr_map, distinct_yrs)\n\n# for Prediction.py\ntopic_yr_vals = {}\nfor topic in range(10):\n temp = []\n for yr in distinct_yrs: #for each year\n temp.append(topic_yr_map[yr][topic])\n topic_yr_vals[topic] = temp\n\n\n# pickle dump\np_dict['topic_yr_map'] = topic_yr_map\np_dict['doc_yr_count'] = doc_yr_count\np_dict['topic_yr_vals'] = topic_yr_vals\npickle_out = open(\"pickle_data.pickle\", \"wb\")\npickle.dump(p_dict, pickle_out)\npickle_out.close()","repo_name":"SomGitHub2018/Topic-model-analysis-NIPS-dataset","sub_path":"Topic Evolution.py","file_name":"Topic Evolution.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"7529517535","text":"class Solution:\n def isRectangleCover(self, rectangles):\n \"\"\"\n :type rectangles: List[List[int]]\n :rtype: bool\n \"\"\"\n def count_points(coor, mask):\n if coor not in m:\n m[coor] = 0\n if m[coor] & mask != 0:\n return False\n m[coor] |= mask\n return True\n\n m = {}\n lx, ly, rx, ry = 2147483647, 2147483647, 0, 0\n area = 0\n for rect in rectangles:\n lx = min(lx, rect[0])\n ly = min(ly, rect[1])\n rx = max(rx, rect[2])\n ry = max(ry, rect[3])\n area += (rect[2] - rect[0]) * (rect[3] - rect[1])\n if not count_points((rect[0], rect[1]), 1):\n return False\n if not count_points((rect[0], rect[3]), 2):\n return False\n if not count_points((rect[2], rect[3]), 4):\n return False\n if not count_points((rect[2], rect[1]), 8):\n return False\n cnt = 0\n for v in m.values():\n if v == 1 or v == 2 or v == 4 or v == 8:\n cnt += 1\n return cnt == 4 and area == (rx - lx) * (ry - ly)\n\n\nif __name__ == '__main__':\n n = int(input())\n rectangles = []\n for i in range(n):\n rectangles.append([int(num) for num in input().split()])\n solution = Solution()\n print(solution.isRectangleCover(rectangles))\n","repo_name":"jiangshen95/UbuntuLeetCode","sub_path":"PerfectRectangle.py","file_name":"PerfectRectangle.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"23927232899","text":"import numpy as np\nimport scipy.linalg as la\n\nfrom ..algorithm import SampleProcessAlgorithm\nfrom ..evaluator import Evaluator\nfrom ..problem import Problem\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass SensitivityAlgorithm(SampleProcessAlgorithm):\n def __init__(self, problem: Problem, step_lengths, steps):\n # Problem\n self.problem = problem\n self.initial = self._require_initial_values(self.problem.get_parameters())\n\n dimensions = self._get_dimensions(problem)\n\n assert len(steps) == dimensions\n assert len(step_lengths) == dimensions\n\n # Algorithm settings\n self.steps = steps\n self.step_lengths = step_lengths\n self.dimensions = dimensions\n\n # Algorithm state\n self.done = False\n\n def set_state(self, state):\n pass\n\n def get_state(self):\n return {\n \"done\": done\n }\n\n def get_settings(self):\n return {\n \"step_lengths\": self.step_lengths,\n \"steps\": self.steps,\n }\n\n def sample(self):\n if self.done: return []\n\n values = np.repeat([self.initial], 1 + np.sum(self.steps) * 2, axis = 0)\n\n # First is base configuration\n k = 1\n\n for d in range(self.dimensions):\n sigma = self.step_lengths[d]\n steps = self.steps[d]\n\n offsets = np.linspace(0.0, sigma * steps, steps + 1)\n\n for offset in offsets:\n values[k + 0, d] += offset\n values[k + 1, d] -= offset\n k += 2\n\n return values\n\n def process(self, evaluations):\n self.done = True\n","repo_name":"sebhoerl/boptx","sub_path":"src/boptx/algorithms/sensitivity.py","file_name":"sensitivity.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"74812546295","text":"\nimport numpy as np\nfrom tvb.simulator.lab import connectivity\n\n\nctb_folder = \"E:\\\\LCCN_Local\\PycharmProjects\\CTB_data3\\\\\"\n\nsubj_ids = [35, 49, 50, 58, 59, 64, 65, 71, 75, 77]\nsubjects = [\"NEMOS_0\" + str(id) for id in subj_ids]\n\n# Define regions implicated in Functional analysis: remove Cerebelum, Thalamus, Caudate (i.e. subcorticals)\ncortical_rois = ['Precentral_L', 'Precentral_R', 'Frontal_Sup_2_L',\n 'Frontal_Sup_2_R', 'Frontal_Mid_2_L', 'Frontal_Mid_2_R',\n 'Frontal_Inf_Oper_L', 'Frontal_Inf_Oper_R', 'Frontal_Inf_Tri_L',\n 'Frontal_Inf_Tri_R', 'Frontal_Inf_Orb_2_L', 'Frontal_Inf_Orb_2_R',\n 'Rolandic_Oper_L', 'Rolandic_Oper_R', 'Supp_Motor_Area_L',\n 'Supp_Motor_Area_R', 'Olfactory_L', 'Olfactory_R',\n 'Frontal_Sup_Medial_L', 'Frontal_Sup_Medial_R',\n 'Frontal_Med_Orb_L', 'Frontal_Med_Orb_R', 'Rectus_L', 'Rectus_R',\n 'OFCmed_L', 'OFCmed_R', 'OFCant_L', 'OFCant_R', 'OFCpost_L',\n 'OFCpost_R', 'OFClat_L', 'OFClat_R', 'Insula_L', 'Insula_R',\n 'Cingulate_Ant_L', 'Cingulate_Ant_R', 'Cingulate_Mid_L',\n 'Cingulate_Mid_R', 'Cingulate_Post_L', 'Cingulate_Post_R',\n 'Hippocampus_L', 'Hippocampus_R', 'ParaHippocampal_L',\n 'ParaHippocampal_R', 'Calcarine_L',\n 'Calcarine_R', 'Cuneus_L', 'Cuneus_R', 'Lingual_L', 'Lingual_R',\n 'Occipital_Sup_L', 'Occipital_Sup_R', 'Occipital_Mid_L',\n 'Occipital_Mid_R', 'Occipital_Inf_L', 'Occipital_Inf_R',\n 'Fusiform_L', 'Fusiform_R', 'Postcentral_L', 'Postcentral_R',\n 'Parietal_Sup_L', 'Parietal_Sup_R', 'Parietal_Inf_L',\n 'Parietal_Inf_R', 'SupraMarginal_L', 'SupraMarginal_R',\n 'Angular_L', 'Angular_R', 'Precuneus_L', 'Precuneus_R',\n 'Paracentral_Lobule_L', 'Paracentral_Lobule_R', 'Heschl_L', 'Heschl_R',\n 'Temporal_Sup_L', 'Temporal_Sup_R', 'Temporal_Pole_Sup_L',\n 'Temporal_Pole_Sup_R', 'Temporal_Mid_L', 'Temporal_Mid_R',\n 'Temporal_Pole_Mid_L', 'Temporal_Pole_Mid_R', 'Temporal_Inf_L',\n 'Temporal_Inf_R']\n\n\ncorrs = []\nfor emp_subj in subjects:\n\n conn = connectivity.Connectivity.from_file(ctb_folder + emp_subj + \"_AAL2_pass.zip\")\n\n # load text with FC rois; check if match SC\n FClabs = list(np.loadtxt(ctb_folder + \"FCavg_\" + emp_subj + \"/roi_labels.txt\", dtype=str))\n FC_cortex_idx = [FClabs.index(roi) for roi in\n cortical_rois] # find indexes in FClabs that matches cortical_rois\n SClabs = list(conn.region_labels)\n SC_cortex_idx = [SClabs.index(roi) for roi in cortical_rois]\n\n sc_emp = conn.weights[:, SC_cortex_idx][SC_cortex_idx]\n\n # Load empirical data to make simple comparisons\n plv_emp = \\\n np.loadtxt(ctb_folder + \"FCavg_\" + emp_subj + \"/3-alpha_plv_avg.txt\", delimiter=',')[:,\n FC_cortex_idx][\n FC_cortex_idx]\n\n t3 = np.zeros(shape=(2, len(plv_emp) ** 2 // 2 - len(plv_emp) // 2))\n t3[0, :] = plv_emp[np.triu_indices(len(plv_emp), 1)]\n t3[1, :] = sc_emp[np.triu_indices(len(plv_emp), 1)]\n sc_r = np.corrcoef(t3)[0, 1]\n\n corrs.append(sc_r)\n\nnp.average(corrs)","repo_name":"jescab01/thalamusResearch","sub_path":"PAPER2/R1_TH-type&noise/SC-FCemp_corr.py","file_name":"SC-FCemp_corr.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"3893210593","text":"import csv\n\nelection_years = ['1998', '2001', '2005', '2007', '2015', '2019', '2022']\n\nparty_to_party_letter = {\n 'Socialdemokratiet': 'A',\n 'Radikale Venstre': 'B',\n 'Det Konservative Folkeparti': 'C',\n 'Nye Borgerlige': 'D',\n 'Socialistisk Folkeparti': 'F',\n 'Liberal Alliance': 'I',\n 'Moderaterne': 'M',\n 'Dansk Folkeparti': 'O',\n 'Frie Grønne': 'Q',\n 'Venstre': 'V',\n 'Danmarksdemokraterne': 'Æ',\n 'Enhedslisten': 'Ø',\n 'Alternativet': 'Å',\n 'Løsgænger': 'UFG'\n}\n\nparty_renamings = {\n 'Ny Alliance': 'Liberal Alliance',\n 'Det Radikale Venstre': 'Radikale Venstre'\n}\n\nparties_in_parlament = party_to_party_letter.keys()\n\ndef find_parties():\n parties = set()\n for election_year in election_years:\n with open('elected-members/' + election_year + '.csv', 'r', newline='') as file:\n elected_members_reader = csv.reader(file, delimiter=',')\n\n # Skip the headers\n next(elected_members_reader, None)\n\n for elected_member in elected_members_reader:\n parties.add(elected_member[1])\n\n print(parties)\n\n#find_parties()\n\ndef initialize_transfer_counter(party_letter):\n return {letter: 0 for letter in party_to_party_letter.values() if letter is not party_letter}\n\ndef lookup_party_name(party):\n return party if party_renamings.get(party) is None else party_renamings.get(party)\n\ndef find_party_transfer():\n transfers = {letter: initialize_transfer_counter(letter) for letter in party_to_party_letter.values()}\n for election_year in election_years:\n with open('party-transfer/' + election_year + '.csv', 'r', newline='') as file:\n party_transfer_reader = csv.reader(file, delimiter=',')\n\n # Skip the headers\n next(party_transfer_reader, None)\n\n for transfer in party_transfer_reader:\n old_party_letter = party_to_party_letter.get(lookup_party_name(transfer[1]))\n new_party_letter = party_to_party_letter.get(lookup_party_name(transfer[2]))\n\n if old_party_letter is None or new_party_letter is None:\n continue\n\n party_transfers = transfers.get(old_party_letter)\n party_transfer = party_transfers.get(new_party_letter)\n party_transfers[new_party_letter] = party_transfer + 1\n\n print(transfers)\n\nfind_party_transfer()\n\n","repo_name":"Baizley/party-transfers","sub_path":"processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"72613388856","text":"# coding=utf-8\n# given a list of new url, download to path\n\nimport sys,os,argparse,scrapy,io\nfrom scrapy.crawler import CrawlerProcess\nimport logging\n\n\ndef get_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"newslst\")\n\tparser.add_argument(\"outpath\")\n\treturn parser.parse_args()\n\n\ndef process_news(response,newsname):\n\tproc = {\n\t# using xpath selector instead of css since need all children's text\n\t\t\"cnn\":'//div[contains(@class,\"zn-body__paragraph\")]//text()',\n\t\t\"businessinsider\": '//div[contains(@class,\"slide-module\")]/p//text()',\n\t\t\"fox\":'//div[contains(@class,\"article-body\")]/p//text()',\n\t\t\"latimes\":'//div[contains(@class,\"trb_ar_page\")]/p//text()',\n\t\t\"huffingtonpost\":'//div[contains(@class,\"content-list-component bn-content-list-text yr-content-list-text text\")]/p//text()',\n\t\t\"reuters\":'//div[contains(@class,\"StandardArticleBody_body_1gnLA\")]/p//text()',\n\t\t\"usatoday\":'//div[contains(@class,\"asset-double-wide double-wide p402_premium\")]/p[contains(@class,\"p-text\")]//text()',\n\t\t\"bbc\":'//div[contains(@class,\"story-body__inner\")]/p//text()',\n\t\t\"billboard\":'//div[contains(@class,\"article__body js-fitvids-content\")]/p//text()'\n\t}\n\ttexts = []\n\t#for elem in response.css(proc[newsname]).extract():\n\tfor elem in response.xpath(proc[newsname]).extract():\n\t\t#print elem\n\t\ttexts.append(elem)\n\treturn texts\n\nclass NewsSpider(scrapy.Spider):\n\tname = \"news\"\n\tdef __init__(self,news=None,outpath=None,*args,**kwargs):\n\t\t# news should be a (name,url) tuple\n\t\tsuper(NewsSpider,self).__init__(*args,**kwargs)\n\t\tself.news = news\n\t\tself.outpath = outpath\n\n\tdef start_requests(self):\n\n\t\tfor name,url in self.news:\n\t\t\tyield scrapy.Request(url,callback=self.parse,meta={\"name\":name}) # use meta to pass args to parse function\n\n\tdef parse(self,response):\n\t\tnewsname = response.meta['name']\n\t\ttexts = process_news(response,newsname)\n\t\tf = io.open(os.path.join(self.outpath,\"%s.txt\"%newsname),\"w\",encoding=\"utf-8\")\n\t\tfor text in texts:\n\t\t\tf.writelines(\"%s\\n\"%(text))\n\t\tf.close()\n\t\t\n\n\n\nif __name__ == \"__main__\":\n\targs = get_args()\n\tnews = [line.strip().split() for line in open(args.newslst,\"r\").readlines()]\n\n\tif not os.path.exists(args.outpath):\n\t\tos.makedirs(args.outpath)\n\n\tprocess = CrawlerProcess({\n\t 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n\t})\n\n\tlogging.getLogger('scrapy').setLevel(logging.ERROR)\n\n\tprocess.crawl(NewsSpider,news=news,outpath=args.outpath) # pass args for NewsSpider here, stupid scrapy\n\tprocess.start()\n\t\n","repo_name":"JunweiLiang/visual_twitter","sub_path":"script/get_news.py","file_name":"get_news.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"20306116383","text":"#!/usr/bin/env python3\nimport subprocess\nfrom tkinter import Entry, Button, Tk, Label, StringVar\nfrom common import DEFAULT_IP_ADDRESS, DEFAULT_PORT\n\nprocesses = []\nclients = [\n \"Рос\",\n \"Чендлер\",\n \"Джо\",\n \"Рейчел\",\n \"Моника\",\n \"Фиби\"\n]\n\n\nclass Launcher:\n def __init__(self, master):\n self.master = master\n self.master.title('Launcher')\n self.transport = None\n\n self.host = StringVar()\n self.port = StringVar()\n self.clients = StringVar()\n self.msg_text = StringVar()\n self.msg_client = StringVar()\n\n self.host_label = Label(self.master, text='host:')\n self.port_label = Label(self.master, text='port:')\n self.clients_label = Label(self.master, text='clients:')\n self.port_entry = Entry(textvariable=self.port)\n self.clients_entry = Entry(textvariable=self.clients)\n self.host_entry = Entry(textvariable=self.host)\n self.start_button = Button(text=\"Запуск сервера\", command=lambda: self.start())\n self.start_clients_button = Button(text=\"Запуск клиентов\", command=lambda: self.start_clients())\n self.stop_button = Button(text=\"Закрыть все\", command=lambda: self.stop())\n\n self.host_label.grid(row=0, column=0)\n self.port_label.grid(row=1, column=0)\n self.host_entry.grid(row=0, column=1)\n self.port_entry.grid(row=1, column=1)\n self.clients_label.grid(row=2, column=0)\n self.clients_entry.grid(row=2, column=1)\n self.start_button.grid(row=0, column=2, padx=5, pady=5)\n self.start_clients_button.grid(row=1, column=2, padx=5, pady=5)\n self.stop_button.grid(row=2, column=2, rowspan=2, padx=5, pady=5)\n\n self.host_entry.insert(0, DEFAULT_IP_ADDRESS)\n self.port_entry.insert(0, DEFAULT_PORT)\n self.clients_entry.insert(0, 2)\n\n master.mainloop()\n\n def start_clients(self):\n for i in range(int(self.clients.get())):\n processes.append(\n subprocess.Popen(\n f'python3 client.py -a {self.host.get()} -p {self.port.get()} -n {clients[i]}',\n shell=True,\n encoding='utf8'\n )\n )\n\n def start(self):\n \"\"\" Стартуем подпроцессы \"\"\"\n processes.append(\n subprocess.Popen(\n f'python3 server.py -a {self.host.get()} -p {self.port.get()}',\n shell=True,\n encoding='utf8'\n )\n )\n\n def stop(self):\n \"\"\" Останавливаем подпроцессы \"\"\"\n while processes:\n victim = processes.pop()\n print('kill', victim.pid)\n victim.kill()\n # os.killpg(os.getpgid(victim.pid), signal.SIGHUP)\n # os.killpg(os.getpgid(victim.pid), signal.SIGTERM)\n\n\nif __name__ == '__main__':\n Launcher(Tk())","repo_name":"AlexeyDubrov/Client-Server_Chat","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"19496434367","text":"\"\"\"Console script for esm_master.\"\"\"\nimport argparse\nimport sys\n\n# import logging\n# logging.basicConfig(level=logging.DEBUG)\nfrom . import __version__\nfrom esm_motd import check_all_esm_packages\nfrom .esm_master import main_flow\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\n prog=\"esm_master\",\n description=\"tool for downloading, configuring and compiling.\",\n )\n parser.add_argument(\n \"target\",\n metavar=\"target\",\n nargs=\"?\",\n type=str,\n help=\"name of the target (leave empty for full list of targets)\",\n )\n parser.add_argument(\n \"--check\",\n \"-c\",\n action=\"store_true\",\n default=False,\n help=\"show what would be done, not doing anything\",\n )\n parser.add_argument(\n \"--verbose\", \"-v\", action=\"count\", default=False, help=\"toggle verbose mode\"\n )\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n parser.add_argument(\n \"--modify-config\",\n \"-m\",\n dest=\"modify\",\n help=\"[m]odify configuration\",\n default=\"\", # kh 15.07.20 \"usermods.yaml\"\n )\n\n # kh 21.07.20\n parser.add_argument(\n \"--ignore-errors\",\n \"-i\",\n dest=\"ignore\",\n help=\"Ignore errors\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--keep-task-script\",\n \"-k\",\n dest=\"keep\",\n action=\"store_true\",\n default=False,\n help=\"Keep shell script generated to perform compilation/configuration jobs\",\n )\n parser.add_argument(\"--generate_tab_complete\", action=\"store_true\")\n parser.add_argument(\"--list_all_targets\", action=\"store_true\")\n\n parser.add_argument(\n \"--no-motd\",\n help=\"supress the printing of MOTD\",\n default=False,\n action=\"store_true\",\n )\n\n parsed_args = vars(parser.parse_args())\n\n target = \"\"\n check = False\n verbose = False\n modify_config_file = False\n no_motd = False\n\n if parsed_args:\n if \"target\" in parsed_args:\n target = parsed_args[\"target\"]\n if \"check\" in parsed_args:\n check = parsed_args[\"check\"]\n if \"verbose\" in parsed_args:\n verbose = parsed_args[\"verbose\"]\n if \"keep\" in parsed_args:\n keep = parsed_args[\"keep\"]\n if \"modify\" in parsed_args:\n modify_config_file = parsed_args[\"modify\"]\n if \"no_motd\" in parsed_args:\n no_motd = parsed_args[\"no_motd\"]\n\n if not target:\n target = \"\"\n\n if not no_motd:\n check_all_esm_packages()\n\n main_flow(parsed_args, target)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"esm-tools/esm_tools","sub_path":"src/esm_master/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"22"} +{"seq_id":"15561647804","text":"from drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\nfrom rest_framework.routers import DefaultRouter\nfrom django.contrib import admin\nfrom django.urls import path, include, re_path\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\nfrom djoser import views as djoser_views\nfrom rest_framework.routers import SimpleRouter\n\nfrom accounts.views import (\n PsyListCreateView,\n PsyRetrieveUpdateDestroyView,\n LieuConsultationListCreateView,\n LieuConsultationRetrieveUpdateDestroyView,\n CabinetListCreateView,\n CabinetRetrieveUpdateDestroyView,\n PatientListCreateView,\n PatientRetrieveUpdateDestroyView\n)\n\nfrom accounts.views import UserCreateView, TypeConsultationListView\n\nfrom zoomapp.views import CommunicationView\n\nfrom zoomapp.views import VideoCallListCreateView, VideoCallDetailView, GenerateVideoToken\n\nfrom zoomapp.views import AppelClientTwilioListCreateView, AppelClientTwilioDetailView\n\nfrom zoomapp.views import EmailConsultationView\n\nfrom consultcare.views import SpecialisationPsyListCreateView, SpecialisationPsyDetailView, \\\n ForfaitConsultationDetailView, ForfaitConsultationListCreateView, SouscriptionForfaitListCreateView, \\\n SouscriptionForfaitDetailView, ConsultationListCreateView, ConsultationDetailView, CommandeListCreateView, \\\n CommandeDetailView\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Snippets API\",\n default_version='v1',\n description=\"Test description\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,), )\n\nrouter = SimpleRouter()\nrouter.register(r'users', djoser_views.UserViewSet)\nurlpatterns = [\n # authentication\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),\n\n path('register/', UserCreateView.as_view(), name='register'),\n path('api-auth/', include('rest_framework.urls')),\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.jwt')),\n path('auth/', include('djoser.urls')),\n path('auth/', include('djoser.urls.authtoken')),\n path('auth/', include('djoser.urls.jwt')),\n path('auth-google/', include('djoser.social.urls')),\n\n # appel audio and sms call\n # path('start-audio-call/', AudioCallView.as_view(), name='start-audio-call'),\n # path('dj-rest-auth/registration/', include('dj_rest_auth.registration.urls')),\n # path('dj-rest-auth/', include('dj_rest_auth.urls'))\n\n path('appels/', AppelClientTwilioListCreateView.as_view(), name='appel-list-create'),\n path('appels//', AppelClientTwilioDetailView.as_view(), name='appel-detail'),\n\n path('start-sms-com/', CommunicationView.as_view(), name='start-communication'),\n path('email_consultations/', EmailConsultationView.as_view(), name='email_consultations'),\n\n # Appel video suivi de génératio de token\n path('video-calls/', VideoCallListCreateView.as_view(), name='video-call-list-create'),\n path('video-calls//', VideoCallDetailView.as_view(), name='video-call-detail'),\n path('generate-token/', GenerateVideoToken.as_view(), name='generate-token'),\n\n # filters\n path('type-consultation/', TypeConsultationListView.as_view(), name='type-consultation-list'),\n\n # servvices connexee\n\n path('psy/', PsyListCreateView.as_view(), name='psy-list-create'),\n path('psy//', PsyRetrieveUpdateDestroyView.as_view(), name='psy-detail'),\n\n path('lieu/', LieuConsultationListCreateView.as_view(), name='lieu-list-create'),\n path('lieu//', LieuConsultationRetrieveUpdateDestroyView.as_view(), name='lieu-detail'),\n\n path('cabinet/', CabinetListCreateView.as_view(), name='cabinet-list-create'),\n path('cabinet//', CabinetRetrieveUpdateDestroyView.as_view(), name='cabinet-detail'),\n\n path('patient/', PatientListCreateView.as_view(), name='patient-list-create'),\n path('patient//', PatientRetrieveUpdateDestroyView.as_view(), name='patient-detail'),\n\n # Consultation et commande\n path('specialisations/', SpecialisationPsyListCreateView.as_view(), name='specialisation-list-create'),\n path('specialisations//', SpecialisationPsyDetailView.as_view(), name='specialisation-detail'),\n\n path('forfaits/', ForfaitConsultationListCreateView.as_view(), name='forfait-list-create'),\n path('forfaits//', ForfaitConsultationDetailView.as_view(), name='forfait-detail'),\n\n path('souscriptions/', SouscriptionForfaitListCreateView.as_view(), name='souscription-list-create'),\n path('souscriptions//', SouscriptionForfaitDetailView.as_view(), name='souscription-detail'),\n\n path('consultations/', ConsultationListCreateView.as_view(), name='consultation-list-create'),\n path('consultations//', ConsultationDetailView.as_view(), name='consultation-detail'),\n\n path('commandes/', CommandeListCreateView.as_view(), name='commande-list-create'),\n path('commandes//', CommandeDetailView.as_view(), name='commande-detail'),\n\n] + router.urls\n","repo_name":"Megnidro/psycode","sub_path":"psyapp/psyapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"4511706064","text":"#!/bin/python3\n\nimport sys\nN=int(sys.argv[1])\na=0 \nb=1 \ns=0\nfor i in range(N):\n print(a, end =\" \")\n s=a+b\n a=b\n b=s\n","repo_name":"shrististha/DevOps","sub_path":"Shell_Script/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"2269720274","text":"class parent:\n color = \"blue\"\n name = \"Hussein\"\n\n def __init__(self):\n self.len = 100\n self.address = 'Palestine , Gaza strip'\n\n @classmethod\n def get_color(cls):\n print(f'Parent color is : {cls.color}')\n\n def get_len(self):\n print(f'Parent len is : {self.len}')\n\n\nparent1 = parent()\nprint(parent1.len)\nparent1.get_len()\n\n\nclass child(parent):\n def __init__(self):\n super().__init__()\n self.city = 'Gaza'\n self.len = 180\n\n def Get_city(self):\n print(f'child len is : {self.city}')\n\n # def get_len(self):\n # print(f' child len is : {self.len}')\n\n\nchild1 = child()\nprint(child1.city)\nprint(child1.len)\nprint(child1.get_len())\n","repo_name":"HusseinAbuMariam/IT_Foundations","sub_path":"s15.py","file_name":"s15.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"8785446166","text":"import discord\nfrom config import roles\nfrom database import DataBase\nfrom achivements.achivementManager import AchivementManager\n\nmanager = AchivementManager()\ndb = DataBase()\n\n\nasync def setLevel(member, level: int):\n if level is not None and (level >= len(roles) or level < 0):\n raise ValueError(\"Too low/big level value\")\n\n\n db.cur.execute(\"UPDATE users SET level = ? WHERE id = ?\", (level, member.id))\n db.conn.commit()\n\n await manager.check(member, 3)\n\n newRole = discord.utils.get(member.guild.roles, id=roles[level])\n oldRoles = []\n\n for role in member.roles:\n if role.id in roles and role != newRole:\n oldRoles.append(role)\n\n if oldRoles: await member.remove_roles(*oldRoles)\n if newRole not in member.roles: await member.add_roles(newRole)\n\n\n","repo_name":"romkahot/Ieromonah","sub_path":"assets/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"27991233872","text":"# -*- coding:utf-8 -*-\n# author:lyrichu@foxmail.com\n# @Time: 2023/8/3 16:09\n\"\"\"\nmv 视频播放窗口\n\"\"\"\nimport os\n\nfrom PySide6.QtCore import Qt, QUrl, QTimer\nfrom PySide6.QtGui import QIcon\nfrom PySide6.QtMultimedia import QMediaPlayer, QAudioOutput\nfrom PySide6.QtMultimediaWidgets import QVideoWidget\nfrom PySide6.QtWidgets import QMainWindow, QSlider, QLabel, QWidget, QSizePolicy, QHBoxLayout, QVBoxLayout\n\nfrom q_thread.q_thread_tasks import *\nfrom util.utils import format_duration\nfrom widgets.custom_widgets import MyPushButton\n\nresource_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"resource\")\n\n\nclass MvPlayWindow(QMainWindow):\n def __init__(self, main_window, mv: Mv):\n super().__init__(main_window)\n self.main_window = main_window\n self.resize(800, 600)\n self.setWindowTitle(mv.name)\n self.mv = mv\n self.initUI()\n self.initMvPlayStatus()\n self.initSlotConnect()\n\n def initUI(self):\n # 视频播放组件\n self.video_widget = QVideoWidget()\n self.video_widget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n # 初始mv播放化进度条\n self.mv_play_slider = QSlider(Qt.Horizontal)\n self.mv_play_slider.setMinimum(0)\n self.mv_play_slider.setMaximum(100)\n self.mv_play_slider.setValue(0)\n\n # 播放按钮\n self.play_button = MyPushButton(os.path.join(resource_dir, 'icons/music_play_icon.png'))\n # 文本播放进度\n self.mv_play_progress_label = QLabel()\n # 音量条\n self.volume_button = MyPushButton(os.path.join(resource_dir, 'icons/volume_icon.png'))\n self.volume_slider = QSlider(Qt.Vertical)\n self.volume_slider.setRange(0, 100)\n self.volume_slider.setValue(50) # set initial volume to 50\n # 初始不可见,点击音量按钮可见\n self.volume_slider.setVisible(False)\n\n # 保存底部播放栏相关布局\n self.play_status_bar = QWidget()\n # 水平可扩展/垂直不可扩展\n self.play_status_bar.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n # 水平布局\n self.play_status_layout = QHBoxLayout(self.play_status_bar)\n self.play_status_layout.addWidget(self.play_button)\n self.play_status_layout.addWidget(self.mv_play_progress_label)\n # 中间可以灵活伸缩\n self.play_status_layout.addStretch()\n self.play_status_layout.addWidget(self.volume_button)\n self.play_status_layout.addWidget(self.volume_slider)\n\n # 最底部mv信息相关\n self.mv_info_label = QLabel()\n self.mv_info_label.setText(f\"\"\"\n

{self.mv.name}


\n
艺术家:{self.mv.artist}

\n
时长:{self.mv.songTimeMinutes}

\n
播放次数:{self.mv.mvPlayCnt}

\n \"\"\")\n self.mv_info_label.setTextFormat(Qt.TextFormat.RichText)\n\n # 主布局\n self.main_layout = QVBoxLayout()\n self.main_widget = QWidget()\n self.main_widget.setLayout(self.main_layout)\n\n self.main_layout.addWidget(self.video_widget, 20)\n self.main_layout.addWidget(self.mv_play_slider, 1)\n self.main_layout.addWidget(self.play_status_bar, 2)\n self.main_layout.addWidget(self.mv_info_label, 2)\n\n self.setCentralWidget(self.main_widget)\n\n def initMvPlayStatus(self):\n \"\"\"\n 初始化MV播放相关\n :return:\n \"\"\"\n # Media player\n self.media_player = QMediaPlayer()\n # 音频输出\n self.audioOutput = QAudioOutput()\n self.media_player.setVideoOutput(self.video_widget)\n self.media_player.setAudioOutput(self.audioOutput)\n self.is_mv_url_ready = False\n\n def initSlotConnect(self):\n self.play_button.clicked.connect(self.on_play_button_clicked)\n # 定时器 使得 播放进度条可以每秒自动更新\n self.mv_play_slider_timer = QTimer()\n self.mv_play_slider_timer.timeout.connect(self.update_mv_play_slider)\n # Start timer\n self.mv_play_slider_timer.start(1000) # update every 1 second\n # mv的播放进度改变时\n self.media_player.positionChanged.connect(self.update_mv_play_position)\n # 音量滑块\n self.volume_button.clicked.connect(self.toggle_volume_slider)\n # 调整音量\n self.volume_slider.valueChanged.connect(self.set_volume)\n # 用户滑动进度条可以快进/后退 到指定位置\n self.mv_play_slider.sliderMoved.connect(self.mv_play_slider_changed)\n\n def on_play_button_clicked(self):\n \"\"\"\n 当点击mv播放按钮时\n :return:\n \"\"\"\n if self.media_player.isPlaying():\n self.media_player.pause()\n self.play_button.setIcon(QIcon(os.path.join(resource_dir, 'icons/music_pause_icon.png')))\n else:\n self.media_player.play()\n self.play_button.setIcon(QIcon(os.path.join(resource_dir, 'icons/music_play_icon.png')))\n\n def update_mv_play_position(self, position):\n \"\"\"\n 更新mv播放到指定位置\n :param position:\n :return:\n \"\"\"\n # position 是当前播放位置,单位是毫秒,转换成秒需要除以1000\n current_position = position // 1000\n # 将秒数转换成 MM:ss 的格式\n formatted_position = format_duration(current_position)\n total_time = self.mv.songTimeMinutes\n self.mv_play_progress_label.setText(f\"{formatted_position}/{total_time}\")\n\n def update_mv_play_slider(self):\n \"\"\"\n 更新mv播放进度条\n :return:\n \"\"\"\n if self.media_player.isPlaying():\n self.mv_play_slider.setValue(self.media_player.position())\n\n def toggle_volume_slider(self):\n \"\"\"\n 设置音量可见\n :return:\n \"\"\"\n self.volume_slider.setVisible(not self.volume_slider.isVisible())\n\n def set_volume(self, volume=50):\n \"\"\"\n 设置音量为给定的值\n :param volume: 音量数值\n :return:\n \"\"\"\n self.audioOutput.setVolume(volume)\n\n def mv_play_slider_changed(self, value):\n \"\"\"\n 监听mv播放滑块的移动\n :param value:\n :return:\n \"\"\"\n self.media_player.setPosition(value)\n\n def set_mv_source(self, url):\n self.is_mv_url_ready = True\n self.media_player.setSource(QUrl(url))\n self.media_player.play()\n # 更新进度条的大小\n self.mv_play_slider.setMaximum(self.mv.duration * 1000)\n self.mv_play_slider.setValue(0)\n # 关闭音乐的播放\n self.main_window.main_window.main_window.main_window.core_music_player.pause_music()\n\n def load_mv_url_async(self):\n # 避免重复加载\n if self.is_mv_url_ready:\n return\n # 异步加载 mv url\n mv_url_worker = MvUrlWorker(self.mv.id)\n mv_url_worker.mv_url.connect(self.set_mv_source)\n mv_url_worker.finished.connect(lambda: mv_url_worker.deleteLater())\n mv_url_worker.start()\n\n def closeEvent(self, event):\n \"\"\"\n 主窗口被关闭时会调用\n :param event:\n :return:\n \"\"\"\n # 在这里添加你的清理代码\n # 关闭mv的播放\n self.media_player.pause()\n # 窗口关闭时,恢复音乐的播放\n core_music_player = self.main_window.main_window.main_window.main_window.core_music_player\n if not core_music_player.main_window.player.isPlaying():\n core_music_player.play_music()\n","repo_name":"Lyrichu/hh_music","sub_path":"window/mv_play_window.py","file_name":"mv_play_window.py","file_ext":"py","file_size_in_byte":7656,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"22"} +{"seq_id":"69947975738","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pydicom\nimport pylab\nimport pandas as pd\nfrom glob import glob\nimport os.path as op\n\nclass_info_path = '../input/stage_1_detailed_class_info.csv'\ntrain_labels_path = '../input/stage_1_train_labels.csv'\nimages_dir = '../input/stage_1_train_images/'\n\n# data frames\nclass_info_df = pd.read_csv(class_info_path)\ntrain_labels_df = pd.read_csv(train_labels_path)\nimages_df = pd.DataFrame({'path': glob(op.join(images_dir, '*.dcm'))})\nimages_df['patientId'] = images_df['path'].map(lambda x: op.splitext(op.basename(x))[0])\n# parse DICOM header into dataframe\nDICOM_TAGS = ['PatientAge', 'ViewPosition', 'PatientSex']\ndef get_tags(image_path):\n tag_data = pydicom.read_file(image_path, stop_before_pixels = True)\n tag_dict = {tag: getattr(tag_data, tag, '') for tag in DICOM_TAGS}\n tag_dict['path'] = image_path\n return pd.Series(tag_dict)\nmeta_df = images_df.apply(lambda x: get_tags(x['path']), 1)\nmeta_df['PatientAge'] = meta_df['PatientAge'].map(int)\nmeta_df.drop('path', 1).describe(exclude=np.number)\n\n# concatenate the data frames\ninfo_df = pd.concat([class_info_df, train_labels_df.drop('patientId', 1)], 1)\nimage_with_meta_df = pd.merge(images_df, meta_df, on='path')\nbbox_with_info_df = pd.merge(info_df, image_with_meta_df, on='patientId', how='left')\n\nbbox_with_info_df.sample(3)\ndef parse_patient_data(df):\n \"\"\"\n Parse pandas dataframe into the following dictionary:\n data = {\n patientID: {\n 'dicom': path/to/dicom/file,\n 'target': 0 if normal, 1 if pneumonia,\n 'boxes': list of box(es), each box is an array of number [x y width height],\n 'class': one of the three values 'Lung Opacity', 'No Lung Opacity / Not Norma', 'Normal',\n 'age': age of the patient,\n 'view': either 'AP' - anteriorposterior, or 'PA' - posterioranterior,\n 'sex': either 'Male' or 'Female'\n },\n ...\n }\n \"\"\"\n \n extract_box = lambda row: [row['x'], row['y'], row['width'], row['height']]\n \n data = {}\n for n, row in df.iterrows():\n pid = row['patientId']\n if pid not in data:\n data[pid] = {\n 'dicom': '%s/%s.dcm' % (images_dir, pid),\n 'target': row['Target'],\n 'class': row['class'],\n 'age': row['PatientAge'],\n 'view': row['ViewPosition'],\n 'sex': row['PatientSex'],\n 'boxes': []}\n \n if data[pid]['target'] == 1:\n data[pid]['boxes'].append(extract_box(row))\n return data\n\npatients_data = parse_patient_data(bbox_with_info_df)\npatient_ids = list(patients_data.keys())\nprint(patients_data[np.random.choice(patient_ids)])\ndef draw_single_patient(data, mean_image = None):\n \"\"\"\n Draw a single patient with overalying bounding boxes of opacities (if present)\n \"\"\"\n d = pydicom.read_file(data['dicom'])\n im = d.pixel_array\n \n im = np.stack([im] * 3, axis = 2)\n if (mean_image is np.ndarray):\n im = np.subtract(im, mean_image)\n \n for box in data['boxes']:\n im = overlay_box(im = im, box = box, stroke = 6)\n pylab.imshow(im, cmap = pylab.cm.gist_gray)\n\ndef single_patient_info(patient_data, pid):\n patient = patients_data[pid]\n return ('id: %s\\ntarget: %s\\nclass: %s\\nage: %s\\nview: %s\\nsex: %s' % (pid, patient['target'], patient['class'], patient['age'], patient['view'], patient['sex']))\n\ndef overlay_box(im, box, color = [256, 200, 200], stroke = 1):\n \"\"\"\n Overlay a single box on the image\n \"\"\"\n \n box = [int(d) for d in box]\n [x1, y1, width, height] = box\n x2 = x1 + width\n y2 = y1 + height\n \n im[y1:y1 + stroke, x1:x2] = color\n im[y2:y2 + stroke, x1:x2] = color\n im[y1:y2, x1:x1 + stroke] = color\n im[y1:y2, x2:x2 + stroke] = color\n \n return im\n\nplt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\n\nsample_pid = '00436515-870c-4b36-a041-de91049b9ab4'\nplt.title(single_patient_info(patients_data, sample_pid))\ndraw_single_patient(patients_data[sample_pid])\ndef mean_image(patients_data, pids):\n mean_im = np.zeros((1024, 1024), dtype='uint8')\n \n for pid in pids:\n d = pydicom.read_file(patients_data[pid]['dicom'])\n im = d.pixel_array\n mean_im = np.add(mean_im, im)\n mean_im = np.round(np.multiply(np.divide(mean_im, np.amax(mean_im)), 255)).astype(int)\n mean_im = np.stack([mean_im] * 3, axis = 2)\n return mean_im\n\nglobal_mean = mean_image(patients_data, patient_ids)\npylab.imshow(global_mean, cmap = pylab.cm.gist_gray)\nap_patient_ids = bbox_with_info_df.loc[bbox_with_info_df['ViewPosition'] == 'AP', 'patientId'].values\npa_patient_ids = bbox_with_info_df.loc[bbox_with_info_df['ViewPosition'] == 'PA', 'patientId'].values\n\nfig, ax = plt.subplots()\nplt.bar(np.arange(1, 3), list(map(lambda x: x.shape[0], [ap_patient_ids, pa_patient_ids])))\nax.set_ylabel('# patients')\nax.set_xticks(np.arange(1,3))\nax.set_xticklabels(['AP', 'PA'])\n\nmean_ap_img = mean_image(patients_data, ap_patient_ids)\nmean_pa_img = mean_image(patients_data, pa_patient_ids)\n\nplt.figure(num=None, figsize=(16, 8), dpi=80, facecolor='w', edgecolor='k')\nplt.subplot(221)\nsample_ap_pid = np.random.choice(ap_patient_ids)\nplt.title(single_patient_info(patients_data, sample_ap_pid))\ndraw_single_patient(patients_data[sample_ap_pid])\nplt.subplot(223)\ndraw_single_patient(patients_data[sample_ap_pid], mean_ap_img)\n\nplt.subplot(222)\nsample_pa_pid = np.random.choice(pa_patient_ids)\nplt.title(single_patient_info(patients_data, sample_pa_pid))\ndraw_single_patient(patients_data[sample_pa_pid])\nplt.subplot(224)\ndraw_single_patient(patients_data[sample_pa_pid], mean_pa_img)","repo_name":"aorursy/new-nb-5","sub_path":"ngohgia_simple-data-analysis-and-benchmark-wip.py","file_name":"ngohgia_simple-data-analysis-and-benchmark-wip.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"54677346","text":"import time\nimport configparser\n\n'''\n@author: dujianxiao\n'''\nclass InitConfig():\n \n '''\n @初始化config.ini\n @param path:配置文件路径 \n '''\n def initConfig(self,path):\n try:\n email=[]\n fileData=[]\n config=configparser.ConfigParser()\n config.read(path+'/conf.ini', encoding=\"utf-8-sig\")\n '''\n @预置3个数据库\n '''\n DB1=config.get(\"section\",\"DB1\")\n DB2=config.get(\"section\",\"DB2\")\n DB3=config.get(\"section\",\"DB3\")\n fileData.append(DB1)\n fileData.append(DB2)\n fileData.append(DB3)\n ''' \n @邮件相关\n '''\n sendEmail=config.get(\"section\",\"sendEmail\")\n stmphost=config.get(\"section\",\"stmphost\")\n pwd=config.get(\"section\",\"pwd\")\n receive=config.get(\"section\",\"receive\")\n emailTitle=config.get(\"section\",\"emailTitle\")\n emailContent=config.get(\"section\",\"emailContent\")\n email.append(sendEmail)\n email.append(stmphost)\n email.append(pwd)\n email.append(receive)\n email.append(emailTitle)\n email.append(emailContent)\n '''\n @用户变量\n '''\n try:\n '''\n @读取conf.ini中的用户自定义变量\n '''\n userParams=[]\n userParamsValue=[]\n userParamFile=open(path+'/conf.ini',encoding='utf-8')\n for line in userParamFile.readlines():\n line=line.strip('\\n')\n if 0=9:\n if line[0]!='#' and line[0:9]!='[section]':\n userParams.append(line[0:line.find('=')])\n userParamsValue.append(line[line.find('=')+1:])\n except Exception as e:\n print(e)\n return fileData,email,userParams,userParamsValue\n except Exception as e:\n self.console.append(\" \")\n self.console.append(\"\"+'初始化conf.ini失败:'+\" \")\n self.console.append(\"\"+str(e)+\" \")\n self.console.append(\" \")\n print(e)\n ","repo_name":"liqi629/py_study","sub_path":"shimo-master/common/init/InitConfig.py","file_name":"InitConfig.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32798995644","text":"class Joe(object):\n\n greeting = 'Hello, Joe'\n\n # whenever a function is defined in the python class\n # the current instance is passed to it by default\n # notice the self key word in the function which\n # is not sent through tbe function, but throws an\n # error when not removed from the definition inside the class\n # Because of this automatic passing of the instance instance methods are called \"bound\" methods.\n\n def call_me_from_the_class(self):\n print('calling the function')\n\n\njoe_obj = Joe()\nprint(joe_obj.greeting)\n\n# whenever instance method is called from the object's members the instance is\n# passed implicitly.\n\njoe_obj.call_me_from_the_class()\n\n\n","repo_name":"ravisharmaa/python101","sub_path":"class/lessson303.py","file_name":"lessson303.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"30250699372","text":"from bs4 import BeautifulSoup\nimport json\nhtml_content =\"Copy it here \"\n\n# Create a BeautifulSoup object\nsoup = BeautifulSoup(html_content, 'html.parser')\n\n# Find all image tags\nimg_tags = soup.find_all('img')\n\n# Extract the 'src' attribute from each image tag\nimg_urls = [img['src'] for img in img_tags if 'src' in img.attrs]\n\n# Create a dictionary with the image URLs\ndata = {'image_urls': img_urls}\n\n# Save the data to a JSON file\nwith open('image_urls.json', 'w') as json_file:\n json.dump(data, json_file, indent=2)\n\nprint(\"Image URLs have been saved to image_urls.json\")\n","repo_name":"KhaledKammoun/Python-Scripts","sub_path":"Custom-Functions/get_url_img_from_html/imgGetter.py","file_name":"imgGetter.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"34791470573","text":"import os\nimport requests\n\ndef get_address_from_location(lat, long):\n API_KEY = os.getenv('GEOCODER_API_KEY')\n URL = f'https://api.opencagedata.com/geocode/v1/json?q={lat}+{long}&key={API_KEY}'\n res = requests.get(URL)\n data = res.json()\n locations = data['results'][0]['components']\n address = f'{locations[\"county\"]}, {locations[\"state_district\"]}, {locations[\"state\"]}, {locations[\"country\"]}'\n return address","repo_name":"amlannandy/EcoSearch-Old","sub_path":"server/helpers/geocoder.py","file_name":"geocoder.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"32009259884","text":"import logging\nfrom typing import Optional, Dict\nfrom configparser import ConfigParser\nfrom pathlib import Path\n\nfrom threedi_settings.mappings import (\n physical_settings_map,\n time_step_settings_map,\n numerical_settings_map,\n aggregation_settings_map,\n)\nfrom threedi_settings.models import SimulationConfig\nfrom threedi_settings.threedimodel_config import ThreedimodelIni\n\nlogger = logging.getLogger(__name__)\n\n\nclass SimulationConfigWriter:\n \"\"\"\n Writes a 'legacy' ini from a `SimulationConfig` object.\n A `SimulationConfig` instance usually holds simulation settings\n data retrieved from the 3Di API V3.\n \"\"\"\n\n def __init__(\n self,\n simulation_config: SimulationConfig,\n ini_file_path: Path,\n aggregation_file_path: Path,\n legacy_ini_file_path: Optional[Path] = None,\n ):\n self.simulation_config = simulation_config\n assert (self.simulation_config is not None,\n \"simulation_config must not be 'None'\")\n self.aggr_config = ConfigParser()\n self.ini_output_file = ini_file_path\n self.aggregation_file_path = aggregation_file_path\n self.legacy_conf = None\n if legacy_ini_file_path:\n legacy_ini = ThreedimodelIni(legacy_ini_file_path)\n self.config = legacy_ini.config\n else:\n self.config = ConfigParser()\n\n def to_ini(self):\n \"\"\"\n produces a 'legacy style' ini file. Fields that exist in the\n legacy file but do not in the API, are populated with defaults defined\n in the mapping module.\n \"\"\"\n if self.simulation_config.aggregation_config:\n self._add_aggregations()\n with self.aggregation_file_path.open(\"w\") as aggregation_file:\n self.aggr_config.write(aggregation_file)\n else:\n logger.debug(\n \"No aggregation settings defined for simulation %s \",\n self.simulation_config.sim_uid,\n )\n\n self._add(physical_settings_map, self.simulation_config.physical_config)\n self._add(\n time_step_settings_map, self.simulation_config.time_step_config\n )\n self._add(\n numerical_settings_map, self.simulation_config.numerical_config\n )\n self._update_aggregation_path()\n with self.ini_output_file.open(\"w\") as configfile:\n self.config.write(configfile)\n\n def _add(self, settings_map: Dict, sub_setting):\n \"\"\"\n converts settings values from API type to 'legacy' type and adds them\n to the config instance under their 'legacy' section.\n \"\"\"\n for attr_name, mapping in settings_map.items():\n value = getattr(sub_setting, attr_name)\n legacy_field_info, api_field_info, _ = mapping\n if legacy_field_info.ini_section not in self.config:\n self.config[legacy_field_info.ini_section] = {}\n if legacy_field_info.type != api_field_info:\n value = legacy_field_info.type(value)\n self.config[legacy_field_info.ini_section][\n legacy_field_info.name\n ] = f\"{value}\"\n\n def _add_aggregations(self):\n \"\"\"\n converts aggregation settings values from API type to 'legacy' type\n and adds them to the aggr_config instance\n \"\"\"\n for i, entry in enumerate(\n self.simulation_config.aggregation_config, start=1\n ):\n for attr_name, mapping in aggregation_settings_map.items():\n value = getattr(entry, attr_name)\n legacy_field_info, _, _ = mapping\n if str(i) not in self.aggr_config:\n self.aggr_config[str(i)] = {}\n self.aggr_config[str(i)][legacy_field_info.name] = f\"{value}\"\n\n def _update_aggregation_path(self):\n if (\n not self.aggregation_file_path.exists()\n or not self.aggregation_file_path.is_file()\n ):\n return\n self.config[\"output\"][\n \"aggregation_settings\"\n ] = self.aggregation_file_path.resolve().as_posix()\n","repo_name":"nens/threedi-settings","sub_path":"threedi_settings/plain/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":4138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"22"} +{"seq_id":"24778184806","text":"# This file is part of pydidas.\n#\n# pydidas is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Pydidas is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Pydidas. If not, see .\n\n\"\"\"\nModule with the set_default_plugin_dir function which checks whether a plugin\ndirectory has been set and if not, default to the generic one.\n\"\"\"\n\n__author__ = \"Malte Storm\"\n__copyright__ = \"Copyright 2021-2022, Malte Storm, Helmholtz-Zentrum Hereon\"\n__license__ = \"GPL-3.0\"\n__maintainer__ = \"Malte Storm\"\n__status__ = \"Development\"\n__all__ = [\"set_default_plugin_dir\"]\n\nimport os\n\nfrom ..pydidas_q_settings import PydidasQsettings\n\n\nDEFAULT_PATH = __file__\nfor _ in range(4):\n DEFAULT_PATH = os.path.dirname(DEFAULT_PATH)\nDEFAULT_PATH = os.path.join(DEFAULT_PATH, \"pydidas_plugins\")\n\n\ndef set_default_plugin_dir():\n \"\"\"\n St the default plugin directory if no plugin directory has been defined in the\n QSettings at the time of the function call.\n \"\"\"\n _settings = PydidasQsettings()\n _val = _settings.value(\"user/plugin_path\")\n if _val in [None, \"\"]:\n _settings.set_value(\"user/plugin_path\", DEFAULT_PATH)\n","repo_name":"hereon-GEMS/pydidas","sub_path":"pydidas/core/utils/set_default_plugin_dir_.py","file_name":"set_default_plugin_dir_.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"22"} +{"seq_id":"18682886912","text":"# -*- coding: utf-8 -*-\n\"\"\"\n---------------------------------------------------------------------------------------------------------------\nTitle: Transform an XML document to a pandas dataframe to generate Jira reports more quickly\n\nRemark : The main objective is to generate reports containing tables that contain jira data in a Word or Excel\nformat. These tables are easily defined in XML documents (for more information : Readme.md).\n\nWritten by: Jean Guiraud\n---------------------------------------------------------------------------------------------------------------\n\"\"\"\n\n# !/usr/bin/env python3\n# Python version : 3.8\n\nimport os\nimport sys\nimport xml.etree.ElementTree as ET\nimport pandas as pd\nfrom jira.client import JIRA\nfrom tqdm import tqdm\n\nimport docx\nfrom docx.oxml.shared import qn # Feel free to move these out\nfrom docx.oxml.xmlchemy import OxmlElement\nfrom docx.enum.table import WD_ALIGN_VERTICAL\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\n\n\nclass JiraReports:\n \"\"\"\n Transform XML Document from Jira server to a pandas dataframe\n\n :param jira: jira instance\n :param xml: link to the XML document\n \"\"\"\n\n def __init__(self, jira: JIRA, xml: str):\n\n file = ET.parse(xml) # To parse the content in a variable\n self.__root = file.getroot() # To go to the beginning of the XML document\n self.file = []\n\n for table in tqdm(self.__root.findall(\"Table\")):\n\n tab = {}\n for incremental, columns in enumerate(table.findall(\"Column\")):\n tab[incremental] = [columns.get(\"type\"), columns.text]\n\n if table.get(\"style\") == \"Classic\":\n jira_issues = jira.search_issues(table.find(\"JQL\").text, maxResults=False)\n self.file.append(pd.DataFrame(jira_import(jira_issues, tab)))\n\n elif table.get(\"style\") == \"MultipleJQL\":\n\n pandas_tables = []\n\n for filters in table.findall(\"Filters\"):\n for nbJQL in filters.findall(\"JQL\"):\n jira_issues = jira.search_issues(nbJQL.text, maxResults=False)\n pandas_tables.append(pd.DataFrame(jira_import(jira_issues, tab)))\n\n self.file.append(pandas_tables)\n\n elif table.get(\"style\") == \"LinkOneTicket\":\n jira_issues = jira.search_issues(table.find(\"JQL\").text, maxResults=1)\n\n jira_issues = jira.search_issues(\n 'issue in linkedIssues(' + jira_issues[0].key + ', ' +\n table.find(\"JQL\").get(\"link\") + ')', maxResults=False)\n\n self.file.append(pd.DataFrame(jira_import(jira_issues, tab)))\n\n def to_excel(self, document_name: str = \"jira_excel\"):\n \"\"\"\n Generate Excel document\n\n :param document_name: the document name *(default : jira_excel)*\n / you can add the path, like : users/john/test\n \"\"\"\n\n writer = pd.ExcelWriter(document_name + \".xlsx\", engine=\"xlsxwriter\")\n workbook = writer.book # Creating an excel document\n\n # Formatting of the excel document\n header = workbook.add_format({'bold': True, 'align': 'center', 'valign': 'vcenter', 'bg_color': '#D8E4BC'})\n Bold = workbook.add_format({'bold': True, 'align': 'center'})\n # To give the title of the document according to the XML file\n titre = workbook.add_format({'align': 'center', 'bold': True})\n\n worksheet_name = []\n\n for incremental, all_tables in enumerate(self.__root.findall('Table')):\n\n Name = all_tables.get(\"name\")\n tag = [\"/\", \"*\", \":\", \"[\", \"]\"]\n\n for tags in tag: # For prohibited characters\n Name = Name.replace(tags, ' ')\n\n Name = str(incremental + 1) + \" - \" + Name\n\n if len(Name) > 31: # For character length\n Name = Name[:31]\n\n worksheet_name.append(Name)\n\n if all_tables.get('style') == \"Classic\" or all_tables.get('style') == \"LinkOneTicket\":\n self.file[incremental].to_excel(writer, sheet_name=Name, startrow=3, header=False, index=False)\n\n elif all_tables.get('style') == \"MultipleJQL\":\n start_r = 3\n for inc_JQL, nbJQL in enumerate(all_tables[0].findall(\"JQL\")):\n self.file[incremental][inc_JQL].to_excel(writer, sheet_name=Name, startrow=start_r + 1,\n header=False, index=False)\n\n writer.sheets[Name].merge_range(start_r, 0, start_r, len(all_tables.findall('Column')) - 1,\n 'Merged Range')\n writer.sheets[Name].write(start_r, 0, nbJQL.get(\"name\"), Bold)\n\n start_r += len(self.file[incremental][inc_JQL].index)\n\n # To put the name on the Excel sheet\n writer.sheets[Name].write(0, 0, all_tables.get('name'), titre)\n\n for incremental, column in enumerate(all_tables.findall('Column')): # Creation of all the columns\n writer.sheets[Name].write(2, incremental, column.get('name'), header)\n\n writer.sheets[Name].set_row(2, 30)\n\n writer.close()\n\n def to_word(self, document_name: str = \"jira_word\", landscape: bool = False, cell_color: str = \"#85B1ED\"):\n \"\"\"\n Generate Word document from Jira_XMLDocument instance\n\n :param document_name: the document name (default : jira_word)\n :param landscape: document in landscape format (True or False)\n :param cell_color:\n \"\"\"\n\n document = docx.Document()\n document.add_heading(self.__root.get('name'), level=0)\n\n if landscape:\n new_width, new_height = document.sections[-1].page_height, document.sections[-1].page_width\n document.sections[-1].page_width = new_width\n document.sections[-1].page_height = new_height\n\n for incremental, all_tables in enumerate(self.__root.findall('Table')):\n\n document.add_heading(all_tables.get('name'), level=1)\n table = document.add_table(rows=0, cols=len(all_tables.findall('Column')))\n p = table.add_row().cells\n\n for num_cell, column in enumerate(all_tables.findall('Column')):\n table.cell(0, num_cell).text = column.get('name')\n table.cell(0, num_cell).vertical_alignment = WD_ALIGN_VERTICAL.CENTER\n table.rows[0].cells[num_cell].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n __set_cell_background__(table.rows[0].cells[num_cell], cell_color)\n\n __make_rows_bold__(table.rows[0])\n __set_repeat_table_header__(table.rows[0])\n table.style = 'Table Grid'\n table.add_row()\n\n if all_tables.get(\"style\") == \"Classic\" or all_tables.get('style') == \"LinkOneTicket\":\n\n # add the rest of the data frame\n for i in range(self.file[incremental].shape[0]):\n if i != 0:\n table.add_row()\n for j in range(self.file[incremental].shape[-1]):\n table.cell(i + 1, j).text = str(self.file[incremental].values[i, j])\n table.cell(i + 1, j).vertical_alignment = WD_ALIGN_VERTICAL.CENTER\n\n elif all_tables.get(\"style\") == \"MultipleJQL\":\n row = 1\n\n for inc_JQL, nbJQL in enumerate(all_tables[0].findall(\"JQL\")):\n\n if inc_JQL != 0:\n table.add_row()\n\n table.cell(row, 0).text = nbJQL.get(\"name\")\n __make_rows_bold__(table.rows[row])\n\n for column in range(self.file[incremental][inc_JQL].shape[-1]):\n table.cell(row, 0).merge(table.cell(row, column))\n\n table.cell(row, 0).paragraphs[\n 0].alignment = docx.enum.text.WD_ALIGN_PARAGRAPH.CENTER\n\n row += 1\n\n # add the rest of the data frame\n for i in range(self.file[incremental][inc_JQL].shape[0]):\n table.add_row()\n for j in range(self.file[incremental][inc_JQL].shape[-1]):\n table.cell(row, j).text = str(\n self.file[incremental][inc_JQL].values[i, j])\n table.cell(i + 1, j).vertical_alignment = WD_ALIGN_VERTICAL.CENTER\n row += 1\n # save the doc\n document.save(document_name + '.docx')\n\n def to_word_template(self, path_template_word: str, document_name: str = \"jira_word_template\"):\n \"\"\"\n Generate Word template document from Jira_XMLDocument instance\n\n :param path_template_word: path of the word template *(see Readme.md)*\n :param document_name: the document name *(default : jira_word_template)*\n / you can add the path, like : users/john/test\n \"\"\"\n\n # open an existing document\n document = docx.Document(path_template_word)\n tables = document.tables\n\n for incremental, all_tables in enumerate(self.__root.findall('Table')):\n\n tables_length = None\n\n for search in range(len(tables)):\n try:\n if all_tables.get(\"keyword\") == tables[search].cell(1, 0).text:\n tables_length = search\n except:\n continue\n\n search += 1\n\n if tables_length is not None:\n\n if all_tables.get(\"style\") == \"Classic\" or all_tables.get('style') == \"LinkOneTicket\":\n # add the rest of the data frame\n for i in range(self.file[incremental].shape[0]):\n if i != 0:\n tables[0].add_row()\n for j in range(self.file[incremental].shape[-1]):\n tables[tables_length].cell(i + 1, j).text = str(self.file[incremental].values[i, j])\n\n elif all_tables.get(\"style\") == \"MultipleJQL\":\n\n row = 1\n\n for inc_JQL, nbJQL in enumerate(all_tables[0].findall(\"JQL\")):\n\n if row != 1:\n tables[tables_length].add_row()\n\n tables[tables_length].cell(row, 0).text = nbJQL.get(\"name\")\n __make_rows_bold__(tables[tables_length].rows[row])\n\n for column in range(self.file[incremental][inc_JQL].shape[-1]):\n tables[tables_length].cell(row, 0).merge(tables[tables_length].cell(row, column))\n\n tables[tables_length].cell(row, 0).paragraphs[\n 0].alignment = docx.enum.text.WD_ALIGN_PARAGRAPH.CENTER\n\n row += 1\n\n # add the rest of the data frame\n for i in range(self.file[incremental][inc_JQL].shape[0]):\n tables[tables_length].add_row()\n for j in range(self.file[incremental][inc_JQL].shape[-1]):\n tables[tables_length].cell(row, j).text = str(\n self.file[incremental][inc_JQL].values[i, j])\n row += 1\n # save the doc\n document.save(document_name + '.docx')\n\n def __str__(self) -> str:\n return \"Jira_XMLDocument : \" + self.__root.attrib[\"name\"]\n\n\ndef jira_import(jira_issues: dict, information: dict) -> list:\n \"\"\"\n Extract data from jira to a classic list\n\n :param jira_issues; jira ResultList[] *(dictionary)*\n :param information: dictionary that contains 1 key and 2 values:\n - Key = index of the column (1, 2, 3, etc...)\n - Value 1 = data to extract (example : summary, customfield, description, etc...)\n - Value 2 = type of data (example : multiplevalue, link, etc...)\n example : {0: ['', 'summary'], 1: ['', 'summary'], 2: ['', 'description']}\n \"\"\"\n\n import_array = []\n\n if len(jira_issues) != 0:\n\n for incremental, all_issues in enumerate(jira_issues): # Browse all issues in the JSON file\n import_array.append({})\n\n for inc, table in information.items():\n\n # Condition for the multiple values in the JSON variable\n if table[0] == \"multiple_values\": # To check that it is called in the XML file\n\n Fulltext = \"\" # Variable that adds the names of the multiple values\n for multiple_values in eval(\"all_issues.fields.\" + table[1]):\n if Fulltext is None:\n Fulltext = str(multiple_values.name)\n continue\n Fulltext += \"\\n\" + str(multiple_values.name)\n\n import_array[incremental][table[0]] = Fulltext\n\n elif table[0] == \"link\":\n\n Fulltext = []\n\n for link in all_issues.fields.issuelinks:\n if link.type.inward == table[1]:\n if hasattr(link, \"inwardIssue\"):\n cut_string = str(link.inwardIssue.fields.summary).split(\n \"_\") # To get the name of the issue\n\n Fulltext.append(str(cut_string[0]) + \" Iss. \" + str(cut_string[1]))\n\n elif link.type.outward == table[1]:\n if hasattr(link, \"outwardIssue\"):\n cut_string = str(link.outwardIssue.fields.summary).split(\n \"_\") # To get the name of the issue\n\n Fulltext.append(str(cut_string[0]) + \" Iss. \" + str(cut_string[1]))\n\n Text = \"\"\n for Fulltext in Fulltext:\n if Text is None:\n Text = Fulltext\n continue\n Text += \"\\n\" + Fulltext\n\n import_array[incremental][table[1]] = Text\n\n elif table[0] == \"specific_summary\":\n cut_string = str(all_issues.fields.summary).split(\"_\") # Split variable\n import_array[incremental][table[1]] = cut_string[2]\n\n # For all other fields\n else:\n value = table[1]\n if value in import_array[incremental]:\n value += str(inc)\n import_array[incremental][value] = str(eval(\"all_issues.fields.\" + table[1]))\n\n return import_array\n\n\ndef __make_rows_bold__(*rows):\n \"\"\"\n Set a row in bold\n\n :param *rows: python-docx row\n \"\"\"\n\n for row in rows: # Select all rows\n for cell in row.cells: # Select all cells\n for paragraph in cell.paragraphs: # Select all cell's paragraph\n for run in paragraph.runs: # Select all paragraph's run\n run.font.bold = True # Set in bold the run\n\n\ndef __set_repeat_table_header__(row):\n \"\"\"\n Set repeat table row on every new page\n\n :param row: python-docx row\n \"\"\"\n tr = row._tr\n trPr = tr.get_or_add_trPr()\n tblHeader = OxmlElement('w:tblHeader')\n tblHeader.set(qn('w:val'), \"true\")\n trPr.append(tblHeader)\n return row\n\n\ndef __set_cell_background__(cell, fill):\n \"\"\"\n Set color background in a cellular\n\n :param cell: xlsxwriter cell\n :param fill: specifies the color to be used for the background\n \"\"\"\n\n cell_properties = cell._element.tcPr\n try:\n cell_shading = cell_properties.xpath('w:shd')[0] # In case there's already shading\n except IndexError:\n cell_shading = OxmlElement('w:shd') # Add new w:shd element to it\n if fill:\n cell_shading.set(qn('w:fill'), fill) # Set fill property, respecting namespace\n cell_properties.append(cell_shading) # Finally extend cell props with shading element\n","repo_name":"JeanGrd/JiraReports","sub_path":"JiraReports.py","file_name":"JiraReports.py","file_ext":"py","file_size_in_byte":16111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"73755426616","text":"from PyQt5 import QtWidgets, uic\nimport sys\nfrom PyQt5.QtWidgets import QPushButton, QRadioButton, QLineEdit, QVBoxLayout, QLabel\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__() # Call the inherited classes __init__ method\n uic.loadUi(\"TradingUI.ui\", self) # Load the .ui file\n # =================================================\n self.quantity = self.findChild(QLineEdit, \"quantity\")\n self.n_tp = self.findChild(QLineEdit, \"n_tp\")\n self.start = self.findChild(QLineEdit, \"start\")\n self.end = self.findChild(QLineEdit, \"end\")\n self.side = \"Sell\"\n self.distribution = \"uniform\"\n self.symbol = \"XBTUSD\"\n\n # self.error = self.findChild(QLabel, 'currentPrice').text()\n\n self.error = self.findChild(QLabel, \"currentPrice\")\n # =================================================\n self.submitButton = self.findChild(QPushButton, \"submit\")\n self.submitButton.clicked.connect(self.submitOrder)\n\n # SIDE\n layoutSide = self.findChild(QVBoxLayout, \"side\")\n # SELL\n self.sellRadio = self.findChild(QRadioButton, \"Sell\")\n self.sellRadio.setChecked(True)\n self.sellRadio.toggled.connect(lambda: self.__setSide(self.sellRadio))\n # BUY\n self.buyRadio = self.findChild(QRadioButton, \"Buy\")\n self.buyRadio.toggled.connect(lambda: self.__setSide(self.buyRadio))\n\n # DISTRIBUTION\n layoutDistribution = self.findChild(QVBoxLayout, \"distribution\")\n # UNIFORM\n self.uniformRadio = self.findChild(QRadioButton, \"uniform\")\n self.uniformRadio.setChecked(True)\n self.uniformRadio.toggled.connect(\n lambda: self.__setDistribution(self.uniformRadio)\n )\n # NORMAL\n self.normalRadio = self.findChild(QRadioButton, \"normal\")\n self.normalRadio.toggled.connect(\n lambda: self.__setDistribution(self.normalRadio)\n )\n # POSITIVE\n self.positiveRadio = self.findChild(QRadioButton, \"positive\")\n self.positiveRadio.toggled.connect(\n lambda: self.__setDistribution(self.positiveRadio)\n )\n # NEGATIVE\n self.negativeRadio = self.findChild(QRadioButton, \"negative\")\n self.negativeRadio.toggled.connect(\n lambda: self.__setDistribution(self.negativeRadio)\n )\n # ADDING TO LAYOUT\n layoutSide.addWidget(self.sellRadio)\n layoutSide.addWidget(self.buyRadio)\n layoutDistribution.addWidget(self.uniformRadio)\n layoutDistribution.addWidget(self.normalRadio)\n layoutDistribution.addWidget(self.positiveRadio)\n layoutDistribution.addWidget(self.negativeRadio)\n\n self.show() # Show the GUI\n\n def submitOrder(self):\n print(f\"pressed button {self.start.text()} {self.distribution}\")\n\n def __setSide(self, radio):\n if radio.text() == \"Sell\" and radio.isChecked():\n self.side = radio.text()\n elif radio.text() == \"Buy\" and radio.isChecked():\n self.side = radio.text()\n\n def __setDistribution(self, radio):\n if radio.text() == \"uniform\" and radio.isChecked():\n self.distribution = radio.text()\n elif radio.text() == \"normal\" and radio.isChecked():\n self.distribution = radio.text()\n elif radio.text() == \"positive\" and radio.isChecked():\n self.distribution = radio.text()\n elif radio.text() == \"negative\" and radio.isChecked():\n self.distribution = radio.text()\n\n\ndef main():\n app = QtWidgets.QApplication(sys.argv)\n window = Ui()\n app.exec_()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Effanuel/BitMEX-trading-tool","sub_path":"market_maker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"22"} +{"seq_id":"16960278331","text":"import random\n\nwords = ['SHINRINYOKU', 'KOMOREBI', 'KUIDAORE', 'TSUNDOKU', 'WABI-SABI', 'KINTSUGI', 'MONO NO AWARE', 'IRUSU', 'KAROSHI', 'SHOGANAI', 'NATSUKASHII']\n\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ndef check_win(string1, string2):\n if string1 == string2:\n return True\n else:\n return False\n\ndef win_message():\n print('=D')\n print('Congratulations! You have completed the word puzzle. You are now free.')\n replay()\n\ndef loss_message(mystery_word):\n print('\\n')\n print('The correct word was ', mystery_word)\n print('\\n')\n print('=O')\n print('You have used all your guesses and have been hanged!')\n replay()\n \ndef replay():\n revive = input('Replay? (y/n) ')\n if revive == 'y':\n hangman()\n if revive == 'n':\n print('Thank you for playing')\n else:\n replay()\n\ndef print_build(count):\n \n build = [[''],\n [' ____', '| |', '|', '|', '|', '|', '|_______'],\n [' ____', '| |', '| O', '|', '|', '|', '|_______'],\n [' ____', '| |', '| O', '| |', '|', '|', '|_______'],\n [' ____', '| |', '| O', '| -|', '|', '|', '|_______'],\n [' ____', '| |', '| O', '| -|-', '|', '|', '|_______'],\n [' ____', '| |', '| O', '| -|-', '| /', '|', '|_______'],\n [' ____', '| |', '| O', '| -|-', '| /\\\\', '|', '|_______']]\n\n for part in build[count]:\n print(part)\n\ndef welcome_message():\n print('\\n')\n print('WELCOME TO JAPANESE HANGMAN!!!')\n print('WHERE YOU ARE A CRIMINAL ABOUT TO BE HANGED')\n print('SOLVE THE PUZZLE TO BE FREED')\n\ndef hangman():\n\n welcome_message()\n\n mystery_word = words[random.randint(0, 10)]\n dashes = ''\n\n for i in range(len(mystery_word)):\n if mystery_word[i] in alphabet:\n dashes += '_'\n else:\n dashes += mystery_word[i]\n\n letters_guessed = []\n failed_guess_count = 0\n\n while failed_guess_count < 7:\n \n print('\\n')\n print_build(failed_guess_count)\n print('\\n')\n print(dashes)\n print('\\n')\n\n if check_win(dashes, mystery_word):\n win_message()\n \n print(f'You have ', 7 - failed_guess_count, ' guesses remaining')\n print('Letters guessed: ', letters_guessed)\n\n guess = input('Guess a letter: ').upper()\n\n if guess == mystery_word:\n win_message()\n\n elif len(guess) > 1 or guess not in alphabet:\n\n guess = input('GUESS A LETTER: ').upper()\n print('Thank you! =D')\n print('\\n')\n\n elif guess in mystery_word:\n for i in range(len(mystery_word)):\n if mystery_word[i] == guess:\n letters = list(dashes)\n letters[i] = mystery_word[i]\n dashes = ''.join(letters)\n\n else: \n if guess in letters_guessed:\n print('\\n')\n print('You have guessed this letter already. Try another letter.')\n else:\n letters_guessed.append(guess)\n print('Your guess was incorrect.')\n print('\\n')\n failed_guess_count += 1\n\n if failed_guess_count == 7:\n loss_message(mystery_word)\n\nhangman()\n","repo_name":"richardleung1/hman_challenge","sub_path":"h_ngm_n.py","file_name":"h_ngm_n.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"22"} +{"seq_id":"71231358137","text":"#!/usr/bin/env python\r\n\r\n#\r\n# MagicaVoxel2MinecraftPi\r\n#\r\n\r\nfrom voxel_util import create_voxel, post_to_chat, ply_to_positions\r\nfrom magicavoxel_axis import axis\r\nfrom all_clear import clear\r\nfrom time import sleep\r\n\r\n# polygon file format exported from MagicaVoxel\r\nply_file = 'piyo.ply'\r\n\r\n# Origin to create (Minecraft)\r\nx0 = 0\r\ny0 = 5\r\nz0 = 0\r\n\r\n# Rotation degree (MagicaVoxel)\r\nalpha = 0 # x-axis\r\nbeta = 0 # y-axis\r\ngamma = 0 # z-axis\r\n\r\nmodel_settings = {\r\n 'x0': x0,\r\n 'y0': y0,\r\n 'z0': z0,\r\n 'alpha': alpha,\r\n 'beta': beta,\r\n 'gamma': gamma,\r\n}\r\n\r\nclear()\r\npost_to_chat('create polygon file format model')\r\nbox_positions = ply_to_positions(ply_file)\r\ncreate_voxel(box_positions, model_settings)\r\n","repo_name":"creativival/magicavoxel2mcpi","sub_path":"magicavoxel2mcpi.py","file_name":"magicavoxel2mcpi.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"22"} +{"seq_id":"72251793043","text":"import csv\nimport math\nimport io\nimport numpy as np\nfrom PIL import Image as Img\n\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, mixins, status\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom core.models import Label, Csvfile, Dataset, Image\n\nfrom dataset import serializers\n\n\nclass BaseDatasetAttrViewSet(viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.CreateModelMixin):\n \"\"\"Base viewset for user owned dataset attributes\"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n \"\"\"Return objects for the current authenticated user only\"\"\"\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(dataset__isnull=False)\n\n return queryset.filter(\n user=self.request.user\n ).order_by('-name').distinct()\n\n def perform_create(self, serializer):\n \"\"\"Create a new object\"\"\"\n serializer.save(user=self.request.user)\n\n\nclass CsvfileViewSet(BaseDatasetAttrViewSet):\n \"\"\"Manage csvfiles in the database\"\"\"\n queryset = Csvfile.objects.all()\n serializer_class = serializers.CsvfileSerializer\n\n def get_serializer_class(self):\n \"\"\"Return appropriate serializer class\"\"\"\n if self.action == 'upload_csvfile':\n return serializers.CsvfileFileSerializer\n\n return self.serializer_class\n\n @action(methods=['POST'], detail=True, url_path='upload-csvfile')\n def upload_csvfile(self, request, pk=None):\n \"\"\"Upload a csvfile to populate a dataset\"\"\"\n csvfilefile = self.get_object()\n serializer = self.get_serializer(\n csvfilefile,\n data=request.data\n )\n if serializer.is_valid():\n serializer.save()\n csvid = csvfilefile.id\n csvfile = Csvfile.objects.filter(id=csvid)\n user = csvfile[0].user\n file = csvfilefile.file\n labelcol = csvfilefile.labelcol\n start = csvfilefile.imgcolstart\n end = csvfilefile.imgcolend + 1\n size = int(math.sqrt(end-start))\n if size**2 != (end-start):\n raise ValueError('size is not valid!')\n csvf = file.open(mode='r')\n reader = csv.reader(csvf, delimiter=',')\n next(reader, None) # skip the headers\n for i, row in enumerate(reader):\n label = Label.objects.filter(name=row[labelcol])\n if label is None:\n raise ValueError('label is not valid!')\n img = np.array(row[start:end]).reshape(size, size)\n image = Img.fromarray(img.astype(np.uint8), 'L')\n img_array = img.astype(float)/255\n params = {\n 'name': f'{csvid}_{i}',\n 'csvfile': csvfile[0],\n 'row': i,\n 'label': label[0]\n }\n newimg, _ = Image.objects.get_or_create(user=user, **params)\n fimg = io.BytesIO()\n image.save(fimg, 'bmp')\n newimg.image.save(\"image.bmp\", fimg, save=True)\n fimg.close()\n fnp = io.BytesIO()\n np.save(fnp, img_array)\n newimg.img_array.save(\"image.npy\", fnp, save=True)\n fimg.close()\n newimg.save()\n return Response(\n serializer.data,\n status=status.HTTP_200_OK\n )\n file.close()\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n\nclass DatasetViewSet(BaseDatasetAttrViewSet):\n \"\"\"Manage datasets in the database\"\"\"\n queryset = Dataset.objects.all()\n serializer_class = serializers.DatasetSerializer\n\n def get_serializer_class(self):\n \"\"\"Return appropriate serializer class\"\"\"\n if self.action == 'retrieve':\n return serializers.DatasetDetailSerializer\n\n return self.serializer_class\n\n\nclass ImageViewSet(viewsets.GenericViewSet, mixins.ListModelMixin):\n \"\"\"Manage images in the database\"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n queryset = Image.objects.all()\n serializer_class = serializers.ImageSerializer\n\n def get_queryset(self):\n \"\"\"Return objects for the current authenticated user only\"\"\"\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(dataset__isnull=False)\n\n return queryset.filter(\n user=self.request.user\n ).order_by('-name').distinct()\n\n def get_serializer_class(self):\n \"\"\"Return appropriate serializer class\"\"\"\n if self.action == 'retrieve':\n return serializers.ImageDetailSerializer\n\n return self.serializer_class\n","repo_name":"ixime/mnist_classification_api","sub_path":"app/dataset/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11986737407","text":"import torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\nfrom kt_utils import load_dataset\r\nimport torch.utils.data as Data\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nclass cnn(nn.Module):\r\n def __init__(self):\r\n super(cnn,self).__init__()\r\n self.conv1 = nn.Sequential(\r\n nn.ZeroPad2d(3),\r\n nn.Conv2d(in_channels=3,out_channels=8,kernel_size=(7,7),stride=1,padding=0),\r\n nn.BatchNorm2d(8),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)\r\n )\r\n self.conv2 = nn.Sequential(\r\n nn.ZeroPad2d(1),\r\n nn.Conv2d(in_channels=8,out_channels=16,kernel_size=(3,3),stride=1,padding=0),\r\n nn.BatchNorm2d(16),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)\r\n )\r\n self.conv3 = nn.Sequential(\r\n nn.ZeroPad2d(1),\r\n nn.Conv2d(in_channels=16,out_channels=32,kernel_size=(3,3),stride=1,padding=0),\r\n nn.BatchNorm2d(32),\r\n nn.ReLU(),\r\n nn.MaxPool2d(2,2)\r\n )\r\n self.out = nn.Linear(8*8*32,1)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x) #(8,32,32)\r\n x = self.conv2(x) #(16,16,16)\r\n x = self.conv3(x) #(32,8,8)\r\n x = x.view(x.size(0),-1) #(64,2048)\r\n x = self.out(x) #(64,1)\r\n out = F.sigmoid(x)\r\n return out\r\nmodel = cnn()\r\nmodel.load_state_dict(torch.load('model_happy.pkl'))\r\nX_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()\r\ntest_x_plot = X_test_orig.copy()\r\ntest_x = X_test_orig/255\r\ntest_y = Y_test_orig.T\r\ntest_x = test_x.reshape((test_x.shape[0],test_x.shape[3],test_x.shape[1],test_x.shape[2]))\r\ntest_x = torch.from_numpy(test_x).float()\r\ntest_y = torch.from_numpy(test_y).float()\r\ntest_out = model(test_x)\r\ntest_prediction = (test_out.data.cpu().numpy() >= 0.5)\r\na = {1:'happy',0:'unhappy'}\r\n\r\nindex = np.random.randint(0,120)\r\nplt.imshow(test_x_plot[index])\r\nplt.title(a[(test_prediction)[index][0]])\r\nplt.show()\r\n","repo_name":"drzhangweihao/deeping-learning4-2","sub_path":"happy_model.py","file_name":"happy_model.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20621664386","text":"from django.shortcuts import render,redirect\r\nfrom django.http import HttpResponse\r\nfrom django.contrib.auth.forms import AuthenticationForm,UserCreationForm\r\nfrom django.contrib.auth import authenticate,login,logout\r\n\r\nfrom employee.models import User\r\nfrom hrd.models import Employee,TypesMast,StatusMast,GradesMast,DesigMast,ComapnyMast,AcademicsMast,BankDetails,Document\r\nfrom employee.forms import SignUpForm,EmployeeEditForm\r\nfrom hrd.forms import PersonalInfo,OfficeInfo,AcademicsInfo,BankInfo,Documents\r\n\r\nfrom rolepermissions.roles import assign_role\r\nfrom rolepermissions.checkers import has_role\r\nfrom rolepermissions.roles import get_user_roles\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.models import Group\r\nimport datetime\r\nimport os\r\nfrom django.conf import settings\r\n\r\nclass Employees:\r\n\r\n\t@login_required\r\n\tdef index(request):\t\t\r\n\t\t# if has_role(request.user,'is_employee'):\r\n\t\t# \treturn HttpResponse('role')\r\n\t\tuser = User.objects.all()\r\n\t\treturn render(request, \"employees/index.html\",{'user':user})\r\n\r\n\t@login_required\t\r\n\tdef EmployeeCreate(request):\t\r\n\t\tform = SignUpForm()\r\n\t\treturn render(request, \"registration.html\",{'form':form})\r\n\r\n\r\n\tdef saveEmployee(request):\t\r\n\t\tform = SignUpForm(request.POST)\r\n\t\tif request.POST:\r\n\t\t\tif form.is_valid():\r\n\r\n\t\t\t\tuser1 = form.save()\r\n\t\t\t\tassign_role(user1, 'is_employee')\r\n\t\t\t\temp = Employee(user_id=user1.id)\r\n\t\t\t\temp.save()\r\n\t\t\t\t# form.refresh_from_db()\r\n\t\t\t\t# raw_password = form.cleaned_data.get('password1')\r\n\t\t\t\t# user = authenticate(username=user1.username, password=raw_password)\r\n\t\t\t\t # user = User.objects.get(id=1)\r\n\t\t\t\r\n\t\t\t\t# login(request, user)\r\n\t\t\t\treturn redirect('/hrd/employees')\r\n\t\t\t\t# if request.user.is_authenticated:\r\n\t\t\t\t\t\r\n\t\t\t\t\t# role = get_user_roles(user)\r\n\t\t\t\t\t# return HttpResponse(role)\r\n\t\t\t\t\t# return HttpResponse(request.user.username)\r\n\t\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t \t return render(request, \"registration.html\",{'form':form})\r\n\t\telse:\r\n\t\t\treturn render(request, \"registration.html\",{'form':form}) \r\n\r\n\t@login_required\t\t\r\n\tdef deleteEmployee(request,id):\r\n\t\tuser = User.objects.get(pk=id)\r\n\t\tgroup = Group.objects.get(name='is_employee') \r\n\t\tuser.groups.remove(group)\r\n\t\tuser.delete();\r\n\t\treturn redirect('/hrd/employees')\r\n\r\n\t@login_required\r\n\tdef editEmployee(request,id=''):\r\n\t\tif request.method=='POST':\r\n\t\t\tdata = User.objects.get(id=id)\r\n\t\t\tform = EmployeeEditForm(request.POST,instance = data)\t\t\r\n\t\t\tif form.is_valid():\r\n\t\t\t\tform.save();\r\n\t\t\t\treturn redirect('/hrd/employees')\r\n\t\telse:\r\n\t\t\tdata = User.objects.get(id=id)\r\n\t\t\tform = EmployeeEditForm(instance = data)\r\n\t\t\treturn render(request, \"employees/edit.html\",{'form':form,'id':id})\r\n\r\n\t@login_required\r\n\tdef showTabs(request,page,id):\r\n\t\temployee = Employee.objects.get(user_id=id)\t\t\r\n\t\tif page == 'main':\r\n\t\t\tmain = render(request, \"employees/empinfo.html\",{'id':id})\r\n\r\n\t\tif page == 'personal':\r\n\t\t\tif request.method == 'POST':\r\n\t\t\t\tform = PersonalInfo(request.POST,instance=employee)\r\n\t\t\t\tif form.is_valid():\r\n\t\t\t\t\tform.save()\r\n\t\t\t\t\treturn HttpResponse(status=200)\r\n\t\t\t\telse:\r\n\t\t\t\t\tmain = render(request, \"employees/forms/personal.html\",{'form':form},status=201)\t\r\n\t\t\telse:\r\n\t\t\t\tform = PersonalInfo(instance=employee)\r\n\t\t\t\tmain = render(request, \"employees/forms/personal.html\",{'form':form})\t\r\n\r\n\t\tif page == 'office':\r\n\t\t\tif request.method =='POST':\r\n\t\t\t\tform = OfficeInfo(request.POST,instance=employee)\r\n\t\t\t\tif form.is_valid():\r\n\t\t\t\t\tform.save()\r\n\t\t\t\t\treturn HttpResponse(status=200)\r\n\t\t\t\telse:\r\n\t\t\t\t\tmain = render(request, \"employees/forms/office.html\",{'form':form},status=201)\r\n\t\t\telse:\r\n\t\t\t\tform = OfficeInfo(instance=employee)\r\n\t\t\t\tmain = render(request, \"employees/forms/office.html\",{'form':form})\t\r\n\r\n\t\tif page == 'academics':\r\n\t\t\tfile = request.FILES.get('document')\r\n\t\t\tobj = AcademicsMast()\t\r\n\t\t\tif request.method=='POST':\t\t\t\r\n\t\t\t\tform =AcademicsInfo(request.POST)\r\n\t\t\t\tif form.is_valid():\r\n\r\n\t\t\t\t\tAcademicsMast.objects.create(\r\n\t\t\t\t\t\tdoman_of_study= request.POST.get('doman_of_study'),\r\n\t\t\t\t\t\tname_of_board= request.POST.get('name_of_board'),\r\n\t\t\t\t\t\tcomplete_in= request.POST.get('complete_in'),\r\n\t\t\t\t\t\tgared= request.POST.get('gared'),\r\n\t\t\t\t\t\tdocument= file.name,\r\n\t\t\t\t\t\tnote= request.POST.get('gared'),\r\n\t\t\t\t\t\temp_id= request.POST.get('emp_id'),\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttoday_folder = datetime.datetime.now().strftime(\"%B%d_%Y\")+'/document/'\r\n\t\t\t\t\tpath_to_img = os.path.join(settings.MEDIA_ROOT, today_folder)\r\n\t\t\t\t\tif not os.path.exists(path_to_img):\r\n\t\t\t\t\t\tos.mkdir(path_to_img)\r\n\t\t\t\t\timg_path = os.path.join(path_to_img, file.name)\r\n\r\n\t\t\t\t\twith open(img_path, 'wb+') as destination:\r\n\t\t\t\t\t\tif file.multiple_chunks: # size is over than 2.5 Mb\r\n\t\t\t\t\t\t\tfor chunk in file.chunks():\r\n\t\t\t\t\t\t\t\tdestination.write(chunk)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdestination.write(file.read())\r\n\t\t\t\t\tdata = AcademicsMast.objects.filter(emp_id=id)\r\n\t\t\t\t\treturn render(request, \"employees/tableRefresh/academicTable.html\",{'data':data,'id':id})\r\n\t\t\t\telse:\r\n\t\t\t\t\tdata = AcademicsMast.objects.filter(emp_id=id)\r\n\t\t\t\t\tmain = render(request, \"employees/forms/academics.html\",{'form':form,'id':id,'data':data},status=201)\r\n\t\t\telse:\t\r\n\t\t\t\tform = AcademicsInfo()\r\n\t\t\t\tdata = AcademicsMast.objects.filter(emp_id=id)\r\n\t\t\t\tmain = render(request, \"employees/forms/academics.html\",{'form':form,'id':id,'data':data})\r\n\t\tif page == 'bankinfo':\r\n\t\t\tif request.method=='POST':\r\n\t\t\t\tform = BankInfo(request.POST,request.FILES)\r\n\t\t\t\tfile = request.FILES.get('document')\r\n\t\t\t\tif form.is_valid():\r\n\t\t\t\t\tBankDetails.objects.create(\r\n\t\t\t\t\t\taccou_hol_name=request.POST.get('accou_hol_name'),\r\n\t\t\t\t\t\taccou_num =request.POST.get('accou_num'),\r\n\t\t\t\t\t\tbank_name =request.POST.get('bank_name'),\r\n\t\t\t\t\t\tifsc_code =request.POST.get('ifsc_code'),\r\n\t\t\t\t\t\tbranch =request.POST.get('branch'),\r\n\t\t\t\t\t\tdocument =file.name,\r\n\t\t\t\t\t\tnote =request.POST.get('note'),\r\n\t\t\t\t\t\temp_id = request.POST.get('emp_id')\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttoday_folder = datetime.datetime.now().strftime(\"%B%d_%Y\")+'/bankinfo/'\r\n\t\t\t\t\tpath_to_img = os.path.join(settings.MEDIA_ROOT, today_folder)\r\n\t\t\t\t\tif not os.path.exists(path_to_img):\r\n\t\t\t\t\t\tos.mkdir(path_to_img)\r\n\t\t\t\t\timg_path = os.path.join(path_to_img, file.name)\r\n\r\n\t\t\t\t\twith open(img_path, 'wb+') as destination:\r\n\t\t\t\t\t\tif file.multiple_chunks: # size is over than 2.5 Mb\r\n\t\t\t\t\t\t\tfor chunk in file.chunks():\r\n\t\t\t\t\t\t\t\tdestination.write(chunk)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdestination.write(file.read())\r\n\t\t\t\t\tdata = BankDetails.objects.filter(emp_id=id)\r\n\t\t\t\t\treturn render(request, \"employees/tableRefresh/bankinfoRefresh.html\",{'data':data,'id':id},status=200)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tform = BankInfo(request.POST)\r\n\t\t\t\t\tdata = BankDetails.objects.filter(emp_id=id)\r\n\t\t\t\t\tmain = render(request, \"employees/forms/bankinfo.html\",{'form':form,'id':id,'data':data},status=201)\t\r\n\t\t\telse:\r\n\t\t\t\tform = BankInfo()\r\n\t\t\t\tdata = BankDetails.objects.filter(emp_id=id)\r\n\t\t\t\tmain = render(request, \"employees/forms/bankinfo.html\",{'form':form,'id':id,'data':data})\r\n\r\n\t\tif page == 'document':\r\n\t\t\tif request.method == 'POST':\r\n\t\t\t\tform = Documents(request.POST,request.FILES)\r\n\t\t\t\tfile = request.FILES.get('files')\r\n\t\t\t\tif form.is_valid():\r\n\t\t\t\t\tDocument.objects.create(\r\n\t\t\t\t\t\tdocument_title=request.POST.get('document_title'),\r\n\t\t\t\t\t\tdocument_status =request.POST.get('document_status'),\r\n\t\t\t\t\t\tnote =request.POST.get('note'),\r\n\t\t\t\t\t\tfile =file.name,\r\n\t\t\t\t\t\temp_id = request.POST.get('emp_id')\r\n\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttoday_folder = datetime.datetime.now().strftime(\"%B%d_%Y\")+'/document/'\r\n\t\t\t\t\tpath_to_img = os.path.join(settings.MEDIA_ROOT, today_folder)\r\n\t\t\t\t\tif not os.path.exists(path_to_img):\r\n\t\t\t\t\t\tos.mkdir(path_to_img)\r\n\t\t\t\t\timg_path = os.path.join(path_to_img, file.name)\r\n\r\n\t\t\t\t\twith open(img_path, 'wb+') as destination:\r\n\t\t\t\t\t\tif file.multiple_chunks: # size is over than 2.5 Mb\r\n\t\t\t\t\t\t\tfor chunk in file.chunks():\r\n\t\t\t\t\t\t\t\tdestination.write(chunk)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tdestination.write(file.read())\r\n\t\t\t\t\tdata = Document.objects.filter(emp_id=id)\r\n\t\t\t\t\treturn render(request, \"employees/tableRefresh/documentRefresh.html.html\",{'data':data,'id':id},status=200)\r\n\t\t\t\telse:\r\n\t\t\t\t\tdata = Document.objects.filter(emp_id=id)\t\t\t\t\t\r\n\t\t\t\t\tmain = render(request, \"employees/forms/document.html\",{'form':form,'data':data,'id':id},status=201)\r\n\t\t\telse:\r\n\t\t\t\tform = Documents()\r\n\t\t\t\tdata = Document.objects.filter(emp_id=id)\r\n\t\t\t\tmain = render(request, \"employees/forms/document.html\",{'form':form,'data':data,'id':id})\t\t\r\n\t\t\t\t\t\t\t\t\t\r\n\t\treturn main\r\n\r\n","repo_name":"mukeshkushwahlaxyo/Django_hrms","sub_path":"hrd/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23889196621","text":"import altair as alt\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\n\nst.set_page_config(layout=\"wide\")\n\nfrom urllib.error import URLError\n\n@st.cache\ndef get_ranking_data(dataset):\n df = pd.read_csv(f\"data/outputs/{dataset}_sample_rankings.csv\")\n return df\n\ntry:\n dataset_id = 'sfn_2015'\n df = get_ranking_data(dataset_id)\n print(\"done\")\n #model_names = df.model_name.unique()\n model_names = ['neuromatch', 'all-mpnet-base-v2', 'allenai-specter', 't4yt_trained', 'aggregate']\n\n probes = df.probe_id.unique()\n\n document_id = st.selectbox(\n \"Choose document\", df['probe_title'].unique(), 78\n )\n\n if not document_id:\n st.error(\"Please select a document\")\n else:\n document_id = df.loc[df.probe_title == document_id].iloc[0].probe_id\n data = df.loc[df.probe_id == document_id]\n st.write(\"#### \" + data.iloc[0].probe_title)\n st.write(data.iloc[0].probe_abstract)\n\n st.write(\"##### Most similar documents\")\n\n cols = st.columns(len(model_names))\n for i, col in enumerate(cols):\n col.write(\"###### \" + model_names[i][:23])\n row = data.loc[data.model_name == model_names[i]].iloc[0]\n col.write(f\"Scores (1/5/10): {row['score_1']:.1f}, {row['score_5']:.1f}, {row['score_10']:.1f}\")\n for j in range(10):\n title = row[f\"title_{j}\"]\n if title is None:\n title = \"NA\"\n if isinstance(title, float) and np.isnan(title):\n title = \"NA\"\n expander = col.expander(title)\n expander.write(row[f\"abstract_{j}\"])\n\nexcept URLError as e:\n st.error(\n \"\"\"\n **This demo requires internet access.**\n\n Connection error: %s\n \"\"\"\n % e.reason\n )\n","repo_name":"patrickmineault/neuromatch-nlp","sub_path":"display_results.py","file_name":"display_results.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"39526314574","text":"import argparse\nimport sys\nfrom typing import TextIO, Generator, TypeAlias, Iterable, Union\nfrom itertools import chain\n\n\nPacket: TypeAlias = list[Union[int, \"Packet\"]]\n\n\ndef main(packets_file: TextIO, part: int):\n if part == 1:\n packet_pairs = read_packets_part_1(packets_file)\n packets_in_right_order = find_packets_in_right_order(packet_pairs)\n print(sum(packets_in_right_order))\n elif part == 2:\n packet_pairs = read_packets_part_2(packets_file)\n packet_pairs.append([[2]])\n packet_pairs.append([[6]])\n sort_packets(packet_pairs)\n divider_packets_indices = find_divider_packets(packet_pairs)\n assert len(divider_packets_indices) == 2\n print(divider_packets_indices[0] * divider_packets_indices[1])\n else:\n raise ValueError(f\"Invalid part: {part}\")\n\n\ndef read_packets_part_1(packets_file: TextIO) -> Generator[tuple[Packet, Packet], None, None]:\n left = right = None\n for line in packets_file.readlines():\n if line == \"\\n\":\n left = right = None\n continue\n if left is None:\n left = parse_packet(line.strip())\n elif right is None:\n right = parse_packet(line.strip())\n yield left, right\n else:\n raise ValueError(\"Invalid input\")\n if left is not None and right is not None:\n yield left, right\n\n\ndef read_packets_part_2(packets_file: TextIO) -> list[Packet]:\n packets = []\n for line in packets_file.readlines():\n if line == \"\\n\":\n continue\n packets.append(parse_packet(line.strip()))\n return packets\n\n\ndef parse_packet(packet: str) -> Packet:\n return eval(packet)\n\n\ndef find_packets_in_right_order(packet_pairs: Iterable[tuple[Packet, Packet]]) -> list[int]:\n ordered_indices = []\n for i, (left, right) in enumerate(packet_pairs, start=1):\n if is_packet_in_right_order(left, right):\n ordered_indices.append(i)\n return ordered_indices\n\n\ndef sort_packets(packet_pairs: list[Packet]):\n # Bubble sort.\n while True:\n swapped = False\n for i in range(len(packet_pairs) - 1):\n if not is_packet_in_right_order(packet_pairs[i], packet_pairs[i + 1]):\n packet_pairs[i], packet_pairs[i + 1] = packet_pairs[i + 1], packet_pairs[i]\n swapped = True\n if not swapped:\n break\n\n\ndef is_packet_in_right_order(left: Packet, right: Packet) -> bool | None:\n if isinstance(left, int) and isinstance(right, int):\n if left != right:\n return left < right\n return None\n if isinstance(left, int) and isinstance(right, list):\n return is_packet_in_right_order([left], right)\n if isinstance(left, list) and isinstance(right, int):\n return is_packet_in_right_order(left, [right])\n for l, r in zip(left, right):\n is_ordered = is_packet_in_right_order(l, r)\n if is_ordered is not None:\n return is_ordered\n if len(left) != len(right):\n return len(left) < len(right)\n\n\ndef find_divider_packets(packets: list[Packet]) -> list[int]:\n divider_packets = []\n for i, packet in enumerate(packets, start=1):\n if is_divider_packet(packet):\n divider_packets.append(i)\n return divider_packets\n\n\ndef is_divider_packet(packet: Packet) -> bool:\n if isinstance(packet, int):\n return False\n return packet == [[2]] or packet == [[6]]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # File argument or stdin\n parser.add_argument(\"file\", nargs=\"?\",\n type=argparse.FileType(\"r\"), default=sys.stdin)\n parser.add_argument(\"--part\", type=int, default=1)\n\n args = parser.parse_args()\n main(args.file, args.part)\n","repo_name":"gio8tisu/AoC2022","sub_path":"day13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3412335671","text":"import numpy as np\nimport copy\nclass Space:\n def __init__(self, basis: list, bins = (32, 32, 32)):\n self.bins = bins\n self.intervals = basis\n self.coordination = 'Catesian'\n self.dimension = len(basis)\n self.basis = []\n self.diff = []\n if self.dimension >=1:\n self.b1 = np.linspace(*basis[0], num = bins[0])\n self.b1_diff = (basis[0][1] - basis[0][0])/bins[0]\n self.basis.append(self.b1)\n self.diff.append(self.b1_diff)\n if self.dimension >=2:\n self.b2 = np.linspace(*basis[1], num = bins[1])\n self.b2_diff = (basis[1][1] - basis[1][0])/bins[1]\n self.basis.append(self.b2)\n self.diff.append(self.b2_diff)\n if self.dimension >=3:\n self.b3 = np.linspace(*basis[2], num = bins[2])\n self.b3_diff = (basis[2][1] - basis[2][0])/bins[2]\n self.basis.append(self.b3)\n self.diff.append(self.b3_diff)\n basis_expand = [[j for j in range(self.dimension) if j!=i] for i in range(self.dimension)]\n for i in range(self.dimension):\n for j in basis_expand[i]:\n self.basis[i] = np.expand_dims(self.basis[i], axis = j)\n def to_numpy(self,):\n basis_expand = [[self.bins[j] if j!=i else 1 for j in range(self.dimension) ] for i in range(self.dimension)]\n basis = copy.copy(self.basis)\n for i in range(self.dimension):\n basis[i] = np.tile(basis[i], tuple(basis_expand[i]) )\n basis[i] = np.expand_dims(basis[i], axis=-1)\n basis = tuple(basis)\n return np.concatenate(basis, axis=-1)\n \n \n ","repo_name":"ja2006203966/PDE","sub_path":"src/Space.py","file_name":"Space.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72093733521","text":"\"\"\"Handler for task deletion.\"\"\"\n\nfrom pathlib import Path\n\nfrom pybotx import Bot, BubbleMarkup, HandlerCollector, IncomingMessage\n\nfrom app.bot import constants\nfrom app.bot.answers.status import get_status_message\nfrom app.bot.middlewares.db_session import db_session_middleware\nfrom app.interactors.delete_task import DeleteTaskInteractor\nfrom app.services.file_storage import FileStorage\n\ncollector = HandlerCollector()\nfile_storage = FileStorage(Path(constants.FILE_STORAGE_PATH))\n\n\n@collector.command(\n \"/delete-task\",\n visible=False,\n middlewares=[db_session_middleware],\n)\nasync def delete_task(message: IncomingMessage, bot: Bot) -> None:\n assert message.source_sync_id\n\n interactor = DeleteTaskInteractor(\n db_session=message.state.db_session, file_storage=file_storage\n )\n await interactor.execute(message.data[\"task_id\"])\n\n await bot.edit_message(\n bot_id=message.bot.id,\n sync_id=message.source_sync_id,\n body=\"**Задача успешно удалена.**\",\n bubbles=BubbleMarkup(),\n )\n\n await bot.send(message=get_status_message(message))\n","repo_name":"ExpressApp/todo-bot","sub_path":"app/bot/commands/tasks/delete_task.py","file_name":"delete_task.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"3510854071","text":"## Makes and plots the dose response curve for bistable models\n## Author: Sahil Moza\n## June 26, 2014\n\nimport moose\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef setupSteadyState(simdt,plotDt):\n\n ksolve = moose.Ksolve( '/model/kinetics/ksolve' )\n stoich = moose.Stoich( '/model/kinetics/stoich' )\n stoich.compartment = moose.element('/model/kinetics')\n\n stoich.ksolve = ksolve\n #ksolve.stoich = stoich\n stoich.path = \"/model/kinetics/##\"\n state = moose.SteadyState( '/model/kinetics/state' )\n \n #### Set clocks here\n #moose.useClock(4, \"/model/kinetics/##[]\", \"process\")\n #moose.setClock(4, float(simdt))\n #moose.setClock(5, float(simdt))\n #moose.useClock(5, '/model/kinetics/ksolve', 'process' )\n #moose.useClock(8, '/model/graphs/#', 'process' )\n #moose.setClock(8, float(plotDt))\n \n moose.reinit()\n\n state.stoich = stoich\n state.showMatrices()\n state.convergenceCriterion = 1e-8\n \n return ksolve, state\n\ndef parseModelName(fileName):\n pos1=fileName.rfind('/')\n pos2=fileName.rfind('.')\n directory=fileName[:pos1]\n prefix=fileName[pos1+1:pos2]\n suffix=fileName[pos2+1:len(fileName)]\n return directory, prefix, suffix\n\n# Solve for the steady state\ndef getState( ksolve, state, vol):\n scale = 1.0 / ( vol * 6.022e23 )\n moose.reinit()\n state.randomInit() # Removing random initial condition to systematically make Dose reponse curves.\n moose.start( 2.0 ) # Run the model for 2 seconds.\n state.settle()\n \n vector = []\n a = moose.element( '/model/kinetics/a' ).conc\n #print a\n for x in ksolve.nVec[0]:\n vector.append( x * scale)\n moose.start( 10.0 ) # Run model for 10 seconds, just for display\n failedSteadyState = any([np.isnan(x) for x in vector])\n \n if not (failedSteadyState):\n return state.stateType, state.solutionStatus, a, vector\n\n\ndef main():\n # Setup parameters for simulation and plotting\n simdt= 1e-2\n plotDt= 1\n\n # Factors to change in the dose concentration in log scale\n factorExponent = 10 ## Base: ten raised to some power.\n factorBegin = -20\n factorEnd = 21\n factorStepsize = 1\n factorScale = 10.0 ## To scale up or down the factors\n\n # Load Model and set up the steady state solver.\n # model = sys.argv[1] # To load model from a file.\n model = './19085.cspace'\n modelPath, modelName, modelType = parseModelName(model)\n outputDir = modelPath\n \n modelId = moose.loadModel(model, 'model', 'ee')\n dosePath = '/model/kinetics/b/DabX' # The dose entity\n\n ksolve, state = setupSteadyState( simdt, plotDt)\n vol = moose.element( '/model/kinetics' ).volume\n iterInit = 100\n solutionVector = []\n factorArr = []\n \n enz = moose.element(dosePath)\n init = float(enz.kcat) # Dose parameter\n \n # Change Dose here to .\n for factor in range(factorBegin, factorEnd, factorStepsize ):\n scale = factorExponent ** (factor/factorScale) \n enz.kcat = init * scale \n print( \"scale={:.3f}\\tkcat={:.3f}\".format( scale, enz.kcat) )\n for num in range(iterInit):\n stateType, solStatus, a, vector = getState( ksolve, state, vol)\n if solStatus == 0:\n #solutionVector.append(vector[0]/sum(vector))\n solutionVector.append(a)\n factorArr.append(scale) \n \n joint = np.array([factorArr, solutionVector])\n joint = joint[:,joint[1,:].argsort()]\n \n # Plot dose response. Remove NaN from the values else plotting will fail.\n ax = plt.subplot()\n # plt.semilogx was failing. not sure why. That is why this convoluted\n # approach.\n ax.plot( joint[0,:], joint[1,:] , marker=\"o\", label = 'concA')\n ax.set_xscale( 'log' )\n plt.xlabel('Dose')\n plt.ylabel('Response')\n plt.suptitle('Dose-Reponse Curve for a bistable system')\n \n plt.legend(loc=3)\n #plt.savefig(outputDir + \"/\" + modelName +\"_doseResponse\" + \".png\")\n plt.show()\n quit()\n \n\n\nif __name__ == '__main__':\n main()\n","repo_name":"BhallaLab/moose","sub_path":"moose-examples/tutorials/ChemicalBistables/doseResponse.py","file_name":"doseResponse.py","file_ext":"py","file_size_in_byte":4083,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"29844555267","text":"# Given an undirected graph, return true if and only if it is bipartite.\n#\n# Recall that a graph is bipartite if we can split it's set of nodes into two independent subsets A and B\n# such that every edge in the graph has one node in A and another node in B.\n#\n# The graph is given in the following form: graph[i] is a list of indexes j for which the edge between nodes\n# i and j exists.Each node is an integer between 0 and graph.length - 1. There are no self edges or parallel edges:\n# graph[i] does not contain i, and it doesn 't contain any element twice.\n#\n# Example\n# 1:\n# Input: [[1, 3], [0, 2], [1, 3], [0, 2]]\n# Output: true\n# Explanation:\n# The graph looks like this:\n# 0----1\n# | |\n# | |\n# 3----2\n# We can divide the vertices into two groups: {0, 2} and {1, 3}.\n# Example 2:\n# Input: [[1, 2, 3], [0, 2], [0, 1, 3], [0, 2]]\n# Output: false\n# Explanation:\n# The graph looks like this:\n# 0----1\n# | \\ |\n# | \\ |\n# 3----2\n# We cannot find a way to divide the set of nodes into two independent subsets.\n#\n# Note:\n#\n# graph will have length in range[1, 100].\n# graph[i] will contain integers in range[0, graph.length - 1].\n# graph[i] will not contain i or duplicate values.\n# The graph is undirected:\n# if any element j is in graph[i], then i will be in graph[j].\n\nfrom collections import deque\n\nclass Solution:\n def isBipartite(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: bool\n \"\"\"\n A = set()\n B = set()\n A.add(0)\n queue = deque()\n queue.append(0)\n visited = set()\n while queue or len(A | B) < len(graph):\n n1 = queue.pop() if queue else list(filter(lambda n: n not in A | B, range(len(graph))))[0]\n if len(graph[n1]) == 0:\n A.add(n1)\n this_set = A if n1 in A else B\n other_set = B if n1 in A else A\n for n2 in graph[n1]:\n if n2 in this_set:\n return False\n other_set.add(n2)\n if n2 not in visited:\n queue.append(n2)\n visited.add(n1)\n return True\n\n\nif __name__ == '__main__':\n\n result = Solution().isBipartite(\n [[2,4],[2,3,4],[0,1],[1],[0,1],[7],[9],[5],[],[6],[12,14],[],[10],[],[10],[19],[18],[],[16],[15],[23],[23],[],\n [20,21],[],[],[27],[26],[],[],[34],[33,34],[],[31],[30,31],[38,39],[37,38,39],[36],[35,36],[35,36],[43],[],[],\n [40],[],[49],[47,48,49],[46,48,49],[46,47,49],[45,46,47,48]])\n assert not result\n result = Solution().isBipartite(\n [[], [2, 4, 6], [1, 4, 8, 9], [7, 8], [1, 2, 8, 9], [6, 9], [1, 5, 7, 8, 9], [3, 6, 9], [2, 3, 4, 6, 9],\n [2, 4, 5, 6, 7, 8]])\n assert not result\n result = Solution().isBipartite([[1,3], [0,2], [1,3], [0,2]])\n assert result\n result = Solution().isBipartite([[1,2,3], [0,2], [0,1,3], [0,2]])\n assert not result\n","repo_name":"mzivi/elgoog","sub_path":"leetcode/785_is_graph_bipartite.py","file_name":"785_is_graph_bipartite.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28898861630","text":"# This file is used for simplying running all the search algorithms for the analysis\n\nfrom xpuzzle import XPuzzle, PrioritizedPuzzle, Move\nfrom timeout import timeout, TimeoutError\nfrom heuristics import calcH0, calcH1, calcH2\n# import heapq\n# import numpy as np\nfrom typing import List, Tuple, Dict, Callable, Set, Type, Optional\nimport time\nimport sys\nimport os\n\n\nimport importlib\n# Importing UCS\nuniformcost = importlib.import_module('uniform-cost')\n\n# Importing GBF\nimport greedy_best_first\n\n# Importing a-star\na_star = importlib.import_module('a-star')\n\n\ndef timer_ended(ind: int, filename: str, h: str, input_name: str):\n with open(\"results/{}/{}_{}{}_solution.txt\".format(input_name, ind, filename, h), \"w\") as f_solution:\n f_solution.write(\"No solution found in 60 seconds\")\n \n with open(\"results/{}/{}_{}{}_search.txt\".format(input_name, ind, filename, h), \"w\") as f_search:\n f_search.write(\"No solution found in 60 seconds\")\n\n\nif __name__ == \"__main__\":\n \n def get_tile_to_move(move: Type[Move], puzzle: XPuzzle):\n \"\"\"\n Helper function to get the (non-zero) tile moved during a move\n \"\"\"\n num1 = puzzle.state[move.idx1[0]][move.idx1[1]]\n num2 = puzzle.state[move.idx2[0]][move.idx2[1]]\n return num1 if num1 != 0 else num2\n\n # Command line argument parsing\n import argparse\n\n parser = argparse.ArgumentParser(description=\"A Star algorithm using 2(+1) different heuristics.\")\n parser.add_argument('-f', '--filename', dest='filename', default=r\"samplePuzzles.txt\", type=str)\n parser.add_argument('-s', '--shape', dest='shape', default=(2, 4), nargs=2, type=int)\n parser.add_argument('-astar', '--astar', dest='astar', action='store_true')\n parser.add_argument('-gbf', '--greedy', dest='gbf', action='store_true')\n parser.add_argument('-ucs', '--uniform', dest='ucs', action='store_true')\n\n args = parser.parse_args()\n\n shape: Tuple[int, int] = tuple(args.shape) #type: ignore\n puzzles = XPuzzle.from_file(args.filename, shape)\n\n output_dir = \"results/{}\".format(args.filename[:-4])\n\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n astar: bool = args.astar\n gbf: bool = args.gbf\n ucs: bool = args.ucs\n\n if not gbf + ucs + astar == 1:\n sys.exit(\"Invalid search type\")\n\n filename = 'astar'\n search_func = a_star.a_star\n\n if gbf and not ucs:\n filename = 'gbf'\n search_func = greedy_best_first.greedy_best_first\n elif not gbf and ucs:\n filename = 'ucs'\n search_func = uniformcost.uniform_cost\n\n\n\n # Iterate through all the puzzles, applying a-star with heuristic #1 and #2 + output results to files\n for ind, puzzle in enumerate(puzzles):\n print(f'Performing puzzle {ind}')\n\n if ucs:\n # h1\n try:\n # applying uniform cost search\n start_time = time.time()\n path_taken_h1, search_path_h1, h_score_h1, g_score_h1, f_score_h1 = search_func(puzzle)\n elapsed_time = time.time() - start_time\n\n # solution path file\n with open(\"results/{}/{}_{}-h1_solution.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_solution:\n total_cost = 0\n for move, new_state in path_taken_h1[::-1]: # we iterate from end to beginning because the order is reversed\n if move is None:\n f_solution.write(\"{} {} {}\\n\".format(0, 0, str(new_state)))\n else:\n f_solution.write(\"{} {} {}\\n\".format(get_tile_to_move(move, new_state), move.cost, str(new_state)))\n total_cost += move.cost\n \n f_solution.write(\"\\n{} {}\".format(total_cost, elapsed_time))\n \n # search path file\n with open(\"results/{}/{}_{}-h1_search.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_search:\n for node in search_path_h1:\n f_search.write(\"{} {} {} {}\\n\".format(node[0], node[1], node[2], str(node[3])))\n \n except TimeoutError as e:\n print(e)\n timer_ended(ind, filename, '', args.filename[:-4])\n \n\n\n else:\n # is a-star or greedy best first\n \n # h0\n try:\n # Running algo with H0\n start_time = time.time()\n path_taken_h0, search_path_h0, h_score_h0, g_score_h0, f_score_h0 = search_func(puzzle, calcH0)\n elapsed_time = time.time() - start_time\n\n # solution path file\n with open(\"results/{}/{}_{}-h0_solution.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_solution_h0:\n total_cost = 0\n for move, new_state in path_taken_h0[::-1]: # we iterate from end to beginning because the order is reversed\n if move is None:\n f_solution_h0.write(\"{} {} {}\\n\".format(0, 0, str(new_state)))\n else:\n f_solution_h0.write(\"{} {} {}\\n\".format(get_tile_to_move(move, new_state), move.cost, str(new_state)))\n total_cost += move.cost\n \n f_solution_h0.write(\"\\n{} {}\".format(total_cost, elapsed_time))\n \n # search path file\n with open(\"results/{}/{}_{}-h0_search.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_search_h0:\n for node in search_path_h0:\n f_search_h0.write(\"{} {} {} {}\\n\".format(node[0], node[1], node[2], str(node[3])))\n \n except TimeoutError as e:\n print(e)\n timer_ended(ind, filename, '-h0', args.filename[:-4])\n\n # h1\n try:\n # applying a-star\n start_time = time.time()\n path_taken_h1, search_path_h1, h_score_h1, g_score_h1, f_score_h1 = search_func(puzzle, calcH1)\n elapsed_time = time.time() - start_time\n\n # solution path file\n with open(\"results/{}/{}_{}-h1_solution.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_solution_h1:\n total_cost = 0\n for move, new_state in path_taken_h1[::-1]: # we iterate from end to beginning because the order is reversed\n if move is None:\n f_solution_h1.write(\"{} {} {}\\n\".format(0, 0, str(new_state)))\n else:\n f_solution_h1.write(\"{} {} {}\\n\".format(get_tile_to_move(move, new_state), move.cost, str(new_state)))\n total_cost += move.cost\n \n f_solution_h1.write(\"\\n{} {}\".format(total_cost, elapsed_time))\n \n # search path file\n with open(\"results/{}/{}_{}-h1_search.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_search_h1:\n for node in search_path_h1:\n f_search_h1.write(\"{} {} {} {}\\n\".format(node[0], node[1], node[2], str(node[3])))\n \n except TimeoutError as e:\n print(e)\n timer_ended(ind, filename, '-h1', args.filename[:-4])\n\n \n # h2\n try:\n # applying a-star\n start_time = time.time()\n path_taken_h2, search_path_h2, h_score_h2, g_score_h2, f_score_h2 = search_func(puzzle, calcH2)\n elapsed_time = time.time() - start_time\n\n # solution path file\n with open(\"results/{}/{}_{}-h2_solution.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_solution_h2:\n total_cost = 0\n for move, new_state in path_taken_h2[::-1]: # we iterate from end to beginning because the order is reversed\n if move is None:\n f_solution_h2.write(\"{} {} {}\\n\".format(0, 0, str(new_state)))\n else:\n f_solution_h2.write(\"{} {} {}\\n\".format(get_tile_to_move(move, new_state), move.cost, str(new_state)))\n total_cost += move.cost\n \n f_solution_h2.write(\"\\n{} {}\".format(total_cost, elapsed_time))\n \n # search path file\n with open(\"results/{}/{}_{}-h2_search.txt\".format(args.filename[:-4], ind, filename), \"w\") as f_search_h2:\n for node in search_path_h2:\n f_search_h2.write(\"{} {} {} {}\\n\".format(node[0], node[1], node[2], str(node[3])))\n \n except TimeoutError as e:\n print(e)\n timer_ended(ind, filename, '-h2', args.filename[:-4])\n\n","repo_name":"WillTarte/COMP472_A2","sub_path":"unified.py","file_name":"unified.py","file_ext":"py","file_size_in_byte":9021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38434578000","text":"#coding=utf-8\n#Version:python3.5.2\n#Tools:Pycharm 2017.1\nfrom urllib import request\nimport gevent\nimport time\nfrom gevent import monkey\nmonkey.patch_all()\n\ndef f(url):\n print('Get %s' %url)\n req=request.urlopen(url)\n data=req.read()\n print(data)\n\nurls=[\n 'https://www.baidu.com/',\n 'https://www.python.org/',\n 'https://www.taobao.com/'\n]\ntime1=time.time()\nfor i in urls:\n f(i)\nprint('同步',time.time()-time1)\ntime2=time.time()\ngevent.joinall(\n [\n gevent.spawn(f,'https://www.baidu.com/'),\n gevent.spawn(f,'https://www.python.org/'),\n gevent.spawn(f,'https://www.taobao.com/')\n ]\n)\nprint('异步',time.time()-time2)","repo_name":"13661892653/workspace","sub_path":"pyCode/oldboy_Oper_14/week10/自动协程爬虫场景.py","file_name":"自动协程爬虫场景.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25853883856","text":"# # def is_even(a):\n# # return a%2==0\n\n# # print(is_even(5))\n\n\n# # eve = lambda a : a%2==0\n\n# # print(eve(4))\n\n# def last_char(s):\n# return s[-1]\n\n# l = lambda m : m[-1]\n# print(l('mihir'))\n\n# lambda with if else\ndef func(s):\n if len(s)>5:\n return True\n return False\n\nfun = lambda s : len(s)>5 #True if len(s)>5 else False\nprint(fun('mihis'))","repo_name":"mihirverma7781/Python-Scripts","sub_path":"chap12/lambdaexpp.py","file_name":"lambdaexpp.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8263395430","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pytablereader as ptr\nimport pytablewriter as ptw\nimport six\nimport codecs\nimport collections\nfrom collections import defaultdict\n\n#local import\nfrom codes import flag_codes\n\n\ndef write_to_buffer(writer):\n # set output stream to text, so it can be printed og logged\n writer.stream = six.StringIO()\n writer.write_table()\n print(writer.stream.getvalue())\n\n\ndef write_to_markdown(writer):\n # change the output stream to a file\n with codecs.open(\"sample.md\", \"w\", \"utf-8-sig\") as f:\n writer.stream = f\n writer.write_table()\n\n\ndef return_unique(hashmap):\n for key, value in hashmap.items():\n if isinstance(value, list):\n if not (isinstance(value[0], list) or isinstance(value[0], set)):\n hashmap[key] = set(value)\n else: # we need to go deeper\n tmp = []\n for v in value:\n tmp.append(set(v))\n hashmap[key] = tmp\n elif isinstance(value, dict):\n return_unique(value)\n return hashmap\n\n\ndef build_table_stats_annual(data, count_only=False, unique_only=False):\n tmp = []\n table_name = \"Annual Stats\"\n\n if unique_only:\n data = return_unique(data)\n table_name = table_name + \" (Unique only)\"\n\n writer = ptw.MarkdownTableWriter()\n writer.table_name = table_name\n writer.header_list = [\"year\", \"countries\", \"cities\", \"country_city\"]\n\n ordered_data = collections.OrderedDict(sorted(data.items()))\n\n for key, value in ordered_data.iteritems():\n if not count_only:\n tmp.append([key, \", \".join(list(value[0])), \", \".join(list(value[1])), \", \".join(list(value[2]))])\n else:\n tmp.append([key, len(list(value[0])), len(list(value[1])), len(list(value[2]))])\n\n writer.value_matrix = tmp\n write_to_buffer(writer)\n\n\ndef build_table_stats_total(data, count_only=False, unique_only=False):\n tmp = []\n table_name = \"Total Stats\"\n\n if unique_only:\n data = return_unique(data)\n table_name = table_name + \" (Unique only)\"\n\n writer = ptw.MarkdownTableWriter()\n writer.table_name = table_name\n writer.header_list = [\"name\", \"value\"]\n\n for key, value in data.iteritems():\n if not count_only:\n tmp.append([key, \"; \".join(value)])\n else:\n tmp.append([key, len(list(value))])\n\n writer.value_matrix = tmp\n write_to_buffer(writer)\n\n\ndef get_country_flag_emoji(country_name):\n if country_name in flag_codes.country_codes:\n country_code = flag_codes.country_codes[country_name]\n else:\n country_code = 'unknown'\n\n # handle on-line talks\n if country_name == 'WWW':\n return flag_codes.letter_codes['www']\n\n # handle countries not in the list\n if country_code == 'unknown':\n return flag_codes.letter_codes['unknown']\n\n flag_emoji = u''\n for c in country_code:\n flag_emoji += flag_codes.letter_codes[c]\n return flag_emoji\n\n\ndef get_total_stats(data, show_flags=True, unique_only=True):\n number_of_events = len(data['total_country'])\n\n if unique_only:\n data = return_unique(data)\n\n total_country_flags = []\n if show_flags:\n for country in sorted(data['total_country']):\n total_country_flags.append(get_country_flag_emoji(country))\n\n print(\"Involved in \" + str(number_of_events) + \" events in \" +str(len(data['total_country'])) + \" countries.\")\n print(\" \".join(total_country_flags))\n\n\nif __name__ == \"__main__\":\n latest_year = 2019\n stats_pr_year = defaultdict(list)\n stats_total = defaultdict(list)\n current_year = latest_year\n\n loader = ptr.TableUrlLoader(\n \"http://localhost:4000/talks/\",\n \"html\")\n\n writer = ptw.TableWriterFactory.create_from_format_name(\"md\")\n\n for table_data in loader.load():\n country_city_pr_year = []\n country_pr_year = []\n city_pr_year = []\n\n for record in table_data.row_list:\n country_city_pr_year.append(record[1])\n parsed = record[1].split(\",\")\n country_pr_year.append(parsed[0])\n city_pr_year.append(parsed[1])\n\n writer.from_tabledata(table_data)\n\n stats_pr_year[current_year] = [country_pr_year, city_pr_year, country_city_pr_year]\n\n stats_total[\"total_country_city\"] += country_city_pr_year\n stats_total[\"total_country\"] += country_pr_year\n stats_total[\"total_city\"] += city_pr_year\n\n current_year = current_year - 1 # assuming there is data for every year and sorted after year (descending)\n\n #build_table_stats_annual(stats_pr_year, unique_only=True)\n #build_table_stats_annual(stats_pr_year, count_only=True, unique_only=True)\n\n #build_table_stats_total(stats_total, unique_only=True)\n #build_table_stats_total(stats_total, count_only=True, unique_only=True)\n get_total_stats(stats_total)\n","repo_name":"mehmandarov/mehmandarov.github.io","sub_path":"scripts/stats/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71439942481","text":"#Exercício Python 051: Desenvolva um programa que leia o primeiro termo e a razão de uma PA. No final, mostre os 10 primeiros termos dessa progressão.\nprint('''\n=============================\n 10 termos de uma PA\n=============================\n''')\ntermo = int(input('primeiro termo: '))\nrazao = int(input('razao: ')) \ndez = termo + (11 - 1) * razao\nfor c in range(termo,dez, razao):\n print('{} '.format(c), end='→ ')\nprint('acabou')","repo_name":"gabrielwallaceBDS/exercicios-python-3","sub_path":"exercicios/ex051.py","file_name":"ex051.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71148692243","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 8 17:53:18 2021\n\n@author: iagaxtma\n\"\"\"\n\n#%%\n###############################################################################\n#Modules\n###############################################################################\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import interpolate\nimport sys\nimport re\nimport os\nimport glob\nimport pyvista as pv\nimport pandas as pd\n\n\n#%%\n###############################################################################\n#Plotting stuff SECTION\n###############################################################################\n\ndef latexify():\n scale=1\n page_width_pt=485*scale #pt\n inches_per_pt=1/72.27\n golden_mean = (np.sqrt(5.0)-1.0)/2.0 #was /2.0 Aesthetic ratio (you could change this)\n fig_width=page_width_pt*inches_per_pt\n fig_height=fig_width*golden_mean \n fig_size=[fig_width,fig_height]\n\n params = {'backend': 'ps',\n #'text.latex.preamble': [r'\\usepackage{serif}'],\n 'axes.labelsize': 10*scale, # fontsize for x and y labels (was 10)\n 'axes.titlesize': 10*scale,\n #'text.fontsize': 10*scale, # was 10\n 'legend.fontsize': 10*scale, # was 10\n 'xtick.labelsize': 10*scale,\n 'ytick.labelsize': 10*scale,\n 'text.usetex': True,\n 'figure.figsize': fig_size,\n 'axes.linewidth':0.5,\n 'xtick.direction': 'out',\n 'ytick.direction': 'out',\n 'font.family': 'serif',\n 'grid.color' : 'lightgray', # grid color, 'lightgrey','lightgray','silver'\n 'grid.linestyle' : '-',\n 'grid.linewidth' : 0.5, # in points\n 'grid.alpha' : 1.0, \n \"xtick.minor.visible\" : False# transparency, between 0.0 and 1.0\n }\n plt.rcParams.update(params)\n \ndef savefig(filename):\n #plt.savefig('{}.pgf'.format(filename),dpi=1000,bbox_inches='tight')\n plt.savefig('{}.pdf'.format(filename),dpi=600,bbox_inches='tight')\n #tikz_save('{}.tikz'.format(filename),\n #figureheight = '\\\\figureheight',\n #figurewidth = '\\\\figurewidth')\n \ndef savefigPNG(filename):\n plt.savefig('{}.png'.format(filename),dpi=300,bbox_inches='tight')\n \n\nnew_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728','#9467bd', '#8c564b', '#e377c2', '#7f7f7f','#bcbd22', '#17becf']\n\n#%%\n###############################################################################\n#Defintiion Functions\n###############################################################################\n\ndef list_files(dir):\n r = []\n for root, dirs, files in os.walk(dir):\n for name in files:\n if name.endswith(\"vtp\"):\n r.append(os.path.join(root, name))\n r.sort(key=os.path.getctime)\n return r\n\n\ndef loadData(filename,fieldScalar,ofDisc=False) :\n print('\\t \\t Loading and Interpolating file:' + str(filename) + '\\t field: ' + str(fieldScalar))\n data = pv.read(filename)\n mesh=data.points\n x,y,z=mesh[:,0],mesh[:,1],mesh[:,2]\n\n p=data.point_arrays.get_array(fieldScalar)\n\n nx,ny,nz=d*2+1,d*2+1,h+1\n \n if (ofDisc==False): \n xs1D = np.linspace(x.min(),x.max(),nx,endpoint=True)\n ys1D = np.linspace(y.min(),y.max(),ny,endpoint=True)\n zs1D = np.linspace(z.min(),z.max(),nz,endpoint=True)\n print('\\t \\t using user discretization nx,ny,nz = ' + str(nx) + ',' + str(ny) + ',' + str(nz))\n \n x0=np.array([x.min()])[0]\n x1=np.array([x.max()])[0]\n y0=np.array([y.min()])[0]\n y1=np.array([y.max()])[0]\n z0=np.array([z.min()])[0]\n z1=np.array([z.max()])[0]\n \n \n print('\\t \\t interpolating and creating linear cube map')\n [xs,ys,zs] = np.meshgrid(np.array(nx*[x0]),ys1D,zs1D)\n piFront=interpolate.griddata((x,y,z),p,(xs,ys,zs),method='nearest')[:,0,:]\n [xs,ys,zs] = np.meshgrid(np.array(nx*[x1]),ys1D,zs1D)\n piBack=-1*interpolate.griddata((x,y,z),p,(xs,ys,zs),method='nearest')[:,0,:]\n [xs,ys,zs] = np.meshgrid(xs1D,np.array(ny*[y0]),zs1D)\n piSide1=-1*interpolate.griddata((x,y,z),p,(xs,ys,zs),method='nearest')[0,:,:]\n # piSide1=np.flip(piSide1,axis=0)\n [xs,ys,zs] = np.meshgrid(xs1D,np.array(ny*[y1]),zs1D)\n piSide2=interpolate.griddata((x,y,z),p,(xs,ys,zs),method='nearest')[0,:,:]\n \n pFB=np.concatenate((piFront,piBack))\n pSS=np.concatenate((piSide1,piSide2))\n pAll=np.concatenate((piSide1,piFront,piSide2,piBack))\n \n \n xFB=np.linspace(0.5*y0,y1+0.5*y1,pFB.shape[0])\n xSS=np.linspace(y0,y1,pSS.shape[0])\n xAll=np.linspace(y0-0.5*y1,2*y1+0.5*y1,pAll.shape[0])\n zAll=np.linspace(z0,z1,pAll.shape[1])\n \n pAll=np.transpose(pAll)\n pFB=np.transpose(pFB)\n pSS=np.transpose(pSS)\n \n print('\\t ... done')\n return xFB,xSS,xAll,zAll,pFB,pSS,pAll\n\ndef pressureMaxOverHeight(x,y):\n zmaxH=x\n pmaxH=np.max(y,axis=1)\n return zmaxH,pmaxH\n\n\ndef FFT(write):\n \n ###################################################\n # Temporal FFT for the real part and the amplitude of the spatial FFT\n # For the wave number from 0 to Kfft\n ###################################################\n\n print (\"starting FFT for wave No from 0 to \"+str(Kfft))\n \"\"\" By far the most efficient method \"\"\"\n \n start = time()\n \n import multiprocessing\n nthread = multiprocessing.cpu_count()\n\n A=np.zeros((fftAmp.shape[0],fftAmp.shape[1],fftAmp.shape[2],Kfft+1),dtype='complex') \n AmpTfftAmp=np.zeros((int(fftAmp.shape[0]/2),fftAmp.shape[1],fftAmp.shape[2],Kfft+1),dtype='complex')\n AmpTfftReal=np.zeros((int(fftAmp.shape[0]/2),fftAmp.shape[1],fftAmp.shape[2],Kfft+1),dtype='complex')\n \n RealTfftAmp=np.zeros((int(fftAmp.shape[0]/2),fftAmp.shape[1],fftAmp.shape[2],Kfft+1),dtype='complex')\n RealTfftReal=np.zeros((int(fftAmp.shape[0]/2),fftAmp.shape[1],fftAmp.shape[2],Kfft+1),dtype='complex')\n\n import pyfftw\n\n A = pyfftw.interfaces.numpy_fft.fft(fftAmp[:,:,:,0:Kfft+1], axis=0, threads=nthread)\n AmpTfftAmp = np.abs(A[0:int(fftAmp.shape[0]/2),:,:,:])\n AmpTfftReal = np.real(A[0:int(fftAmp.shape[0]/2),:,:,:])\n Sa = np.zeros(N)\n\n # For explanation see https://www.cbcity.de/die-fft-mit-python-einfach-erklaert\n # Determine frequencies resulting from FFT\n # https://github.com/pyNFFT/pyNFFT for non uniform samp\n \n ###################################################\n #Saving the results of the FFT to OD\n ################################################### \n\n if write:\n np.save(OD+'AmpTfftAmp',AmpTfftAmp)\n np.save(OD+'AmpTfftReal',AmpTfftReal)\n np.save(OD+'RealTfftAmp',RealTfftAmp)\n np.save(OD+'RealTfftReal',RealTfftReal)\n \n print('data format: Tfft[freq,x,y,waveNo]')\n return AmpTfftAmp, AmpTfftReal, RealTfftAmp, RealTfftReal\n\n \ndef chunks(l, n):\n n = max(1, n)\n return (l[i:i+n] for i in range(0, len(l), n))\n\ndef createFloorData(heightFloor,z,data):\n nFloors=round(z.max()/heightFloor) #in [m]\n chunkFloors=np.array_split(z, nFloors)\n chunkFloorIndex=[]\n for i in range(0,len(chunkFloors)):\n index=[]\n for j in range(0,len(chunkFloors[i])):\n index.append(np.argwhere(z==chunkFloors[i][j]))\n chunkFloorIndex.append(index)\n \n chunkFloors_z0=[chunkFloors[i][0] for i in range(0,len(chunkFloors))]\n chunkFloors_z1=[chunkFloors[i][-1] for i in range(0,len(chunkFloors))]\n chunkFloors_index0=[chunkFloorIndex[i][0] for i in range(0,len(chunkFloorIndex))]\n chunkFloors_index1=[chunkFloorIndex[i][-1] for i in range(0,len(chunkFloorIndex))]\n \n dataFloors=[]\n dataFloorsMean=[]\n for i in range(0,len(pFB)):\n temp=[]\n temp2=[]\n for j in range(0,int(nFloors)):\n temp.append(data[i][int(chunkFloors_index0[j]):int(chunkFloors_index1[j]),:])\n temp2.append(np.mean(data[i][int(chunkFloors_index0[j]):int(chunkFloors_index1[j]),:]))\n dataFloors.append(temp)\n dataFloorsMean.append(temp2)\n \n dataFloorsMean=np.transpose(np.array(dataFloorsMean))\n dataFloorsMean=dataFloorsMean*np.diff(z)[0]*np.diff(xFB)[0]*rho_inf\n zFloors=np.arange(0,nFloors,1)\n return zFloors, dataFloorsMean\n\n\n\ndef calculateSpectra(y, dT):\n # Get shape of truncated forces\n sp = y.shape\n nT = sp[0]\n\n # N is half the length of the output of the FFT (Using symmetrie)\n N = nT//2 + 1 # // -> int\n\n # Calculate the Nyquist frequency\n fNyq = 1 / (2 * dT) # Nyquist frequency\n\n # Empty power spectral density\n # Due to symmetrie, only the first half of the FFT is of interest\n Sa = np.zeros(N)\n\n # For explanation see https://www.cbcity.de/die-fft-mit-python-einfach-erklaert\n # Determine frequencies resulting from FFT\n # https://github.com/pyNFFT/pyNFFT for non uniform samples\n # Time domain\n # -------------\n f = abs(np.fft.fftfreq(nT, dT)[:N]) # Frequency\n # f = np.linspace(0, fNyq, N, endpoint=True) # Same as above\n\n # Calculate the force spectrum\n Sa = abs(np.fft.fft(y)[:N])\n\n # Get the Power Spectral Density\n Sa = Sa**2\n\n # Scaling Factor\n Sa = Sa / nT\n\n # Scaling Factor\n Sa = Sa * dT\n\n # Normalize by standart deviation and frequency\n Sa = Sa * f / (np.std(y) ** 2)\n\n return f, Sa\n\n\n\n\n#%%\n\n###############################################################################\n#INPUT SECTION\n###############################################################################\n\n#Control\ninterpolateData=False\nloadinterpolatedData=True\ncreateInstantAnimation=False\n#Data Input\ncaseName='1_conv_ref1' \nrootPath='/media/dani/linuxHDD/openfoam/simpleFoam/testing/postProcessing/raw/' + caseName\nfilename='building_wall.vtp' #name of data file after inpolating data\nh=160 #height\nd=32 #length\n\n#Global time data\nt0,t1,dt=240,250,1\ntime=np.arange(t0,t1+dt,dt)\ntimeZeroed=time-t0 #Zero time\n\n#Ambient stuff\nrho_inf=1.18 #rho_inf\np_inf=101325 #pressure inf\n\nheightFloor=4 #height floor [m]\n\n\n#%%\n###############################################################################\n#Preprocessing data\n###############################################################################\n\nfileList=list_files(rootPath)\nfields=[]\n\nif (interpolateData==True):\n#Create FileList Dictionary\n for i in range(0,t1-t0):\n print('Preprocessing file: ' + str(i) + '/' + str(t1-t0) + '\\t time: ' + str(time[i]))\n xFB,xSS,xAll,zAll,pFB,pSS,pAll=loadData(fileList[i],'p',ofDisc=False)\n fields.append([timeZeroed[i],xFB,xSS,xAll,zAll,pFB,pSS,pAll])\n del xFB,xSS,xAll,zAll,pFB,pSS,pAll\n \n print('Converting and saving all field data as pickle file:') \n import pandas as pd\n df = pd.DataFrame(fields)\n df.to_pickle(rootPath + '_pickle')\n \nif (loadinterpolatedData==True): \n df=pd.read_pickle(rootPath + '_pickle')\n\nxFB= df[1][0]\nxSS= df[2][0]\nxAll= df[3][0]\nz= df[4][0]\npFB= df[5]*rho_inf\npSS= df[6]*rho_inf\npALL= df[7]\n\n#%%Plotting contour data and saving\nanimationPath= rootPath + '_instantfield'\nif not os.path.exists(animationPath):\n os.makedirs(animationPath)\n\nif (createInstantAnimation==True): \n levels_p=np.linspace(-700,1000,50)\n for i in range(0,len(pALL)):\n print('Creating Animation of instant field ' + str(i+1) + '/' + str(t1-t0))\n latexify()\n fig1, (ax1) = plt.subplots(1)\n axlist=[ax1]\n surf=ax1.contourf(xAll,z,pALL[i]-p_inf,levels=levels_p,extend='both',cmap='coolwarm')\n ax1.set_xlabel(r'x\\,[m]')\n ax1.set_ylabel(r'z\\,[m]')\n ax1.set_ylim(0,z.max())\n plt.colorbar(surf,ax=axlist,format='%.0f',label=r'$p_{rel}\\,[Pa]$',orientation='horizontal',pad=0.2)\n savefigPNG(animationPath + '/_instantField' + str(i))\n\n\n#%%\n###############################################################################\n#Creating floor data\n###############################################################################\nnFloors=round(z.max()/heightFloor) #in [m]\nchunkFloors=np.array_split(z, nFloors)\n\nchunkFloorIndex=[]\nfor i in range(0,len(chunkFloors)):\n index=[]\n for j in range(0,len(chunkFloors[i])):\n index.append(np.argwhere(z==chunkFloors[i][j]))\n chunkFloorIndex.append(index)\n\nchunkFloors_z0=[chunkFloors[i][0] for i in range(0,len(chunkFloors))]\nchunkFloors_z1=[chunkFloors[i][-1] for i in range(0,len(chunkFloors))]\nchunkFloors_index0=[chunkFloorIndex[i][0] for i in range(0,len(chunkFloorIndex))]\nchunkFloors_index1=[chunkFloorIndex[i][-1] for i in range(0,len(chunkFloorIndex))]\n\npFBFloors=[]\npFBFloorsMean=[]\nfor i in range(0,len(pFB)):\n temp=[]\n temp2=[]\n for j in range(0,int(nFloors)):\n temp.append(pFB[i][int(chunkFloors_index0[j]):int(chunkFloors_index1[j]),:])\n temp2.append(np.mean(pFB[i][int(chunkFloors_index0[j]):int(chunkFloors_index1[j]),:]))\n pFBFloors.append(temp)\n pFBFloorsMean.append(temp2)\n\npFBFloorsMean=np.transpose(np.array(pFBFloorsMean))\nFxFloorsMean=pFBFloorsMean*np.diff(z)[0]*np.diff(xFB)[0]*rho_inf\n\nzFloors=np.arange(0,nFloors,1)\n\n\n#%%Plot fx over floors\nfloorPath= rootPath + '_floorPlots'\nif not os.path.exists(floorPath):\n os.makedirs(floorPath)\n\n\nzFloors,FxFloor=createFloorData(heightFloor,z,pFB)\n\nFxFloorsMax=np.array([np.max(FxFloor[i,:]) for i in range(0,FxFloor.shape[0])])\nFxFloorsMin=np.array([np.min(FxFloor[i,:]) for i in range(0,FxFloor.shape[0])])\nFxFloorsMean=np.array([np.mean(FxFloor[i,:]) for i in range(0,FxFloor.shape[0])])\n\n# latexify()\n# fig1, (ax1) = plt.subplots(1)\n# axlist=[ax1] \n# ax1.plot(FxFloorsMax/1000,zFloors,label=r'$F_{x_{max}}$')\n# ax1.plot(FxFloorsMean/1000,zFloors,label=r'$F_{x_{mean}}$')\n# ax1.plot(FxFloorsMin/1000,zFloors,label=r'$F_{x_{min}}$')\n# ax1.fill_betweenx(zFloors, FxFloorsMin/1000, FxFloorsMax/1000, facecolor='grey',alpha=0.2,interpolate=True)\n# ax1.set_xlabel(r'$F_{x_i}\\,[kN]$')\n# ax1.set_ylabel(r'$Floors\\,[-]$')\n# plt.legend()\n# savefig(floorPath + '/_floorPlots' +'fx_z_floors')\n\n#%% #%%Plot fy over floors\n\nzFloors,FyFloor=createFloorData(heightFloor,z,pSS)\nFyFloorsMax=np.array([np.max(FyFloor[i,:]) for i in range(0,FyFloor.shape[0])])\nFyFloorsMin=np.array([np.min(FyFloor[i,:]) for i in range(0,FyFloor.shape[0])])\nFyFloorsMean=np.array([np.mean(FyFloor[i,:]) for i in range(0,FyFloor.shape[0])])\n\n# latexify()\n# fig1, (ax1) = plt.subplots(1)\n# axlist=[ax1] \n# ax1.plot(FyFloorsMax/1000,zFloors,label=r'$F_{y_{max}}$')\n# ax1.plot(FyFloorsMean/1000,zFloors,label=r'$F_{y_{mean}}$')\n# ax1.plot(FyFloorsMin/1000,zFloors,label=r'$F_{y_{min}}$')\n# ax1.fill_betweenx(zFloors, FyFloorsMin/1000, FyFloorsMax/1000, facecolor='grey',alpha=0.2,interpolate=True)\n# ax1.set_xlabel(r'$F_{y_i}\\,[kN]$')\n# ax1.set_ylabel(r'$Floors\\,[-]$')\n# plt.legend()\n# savefig(floorPath + '/_floorPlots' +'fy_z_floors')\n\n\n#%%Calculate Spectra Fx\nfftX=[]\nmax_f_over_heightX = []\nfor i in range(0,nFloors):\n dT=np.diff(time)[0]\n f,S= calculateSpectra(FxFloor[i,:], dT)\n max_f_over_heightX.append(f[np.argmax(S)])\n fftX.append(np.transpose(np.array([f,S])))\n\n\n\n\nf=np.array([fftX[i][:,0] for i in range(0,len(fftX))]).T\nS=np.array([fftX[i][:,1] for i in range(0,len(fftX))]).T\n\nmaxIndex=np.array(np.where(S==np.max(S)))\nfmax=f[maxIndex[0],maxIndex[1]]\nSmax=S[maxIndex[0],maxIndex[1]]\n\nlatexify()\nfig1, (ax1) = plt.subplots(1)\naxlist=[ax1]\nfor i in range(0,nFloors): \n ax1.plot(f[:,i],S[:,i],color='grey',alpha=0.25)\n\nax1.plot(f[:,maxIndex[1]],S[:,maxIndex[1]],color=new_colors[0],label=r'$max\\{f,S,floor\\}=\\{'+str(fmax)+','+str(Smax)+','+str(maxIndex[1])+'\\}$')\nax1.set_xlabel(r'$f_{x_i}\\,[Hz]$')\nax1.set_ylabel(r'$S\\,[-]$')\nax1.set_xscale('log')\nax1.set_yscale('log')\nplt.legend()\n# savefig(floorPath + '/_floorPlots' +'fftx_Ampl')\n\n\n\n\n#%%Calculate Spectra Fy\nfftY=[]\n\nmax_f_over_heightY = []\nfor i in range(0,nFloors):\n dT=np.diff(time)[0]\n f,S= calculateSpectra(FyFloor[i,:], dT)\n max_f_over_heightY.append(f[np.argmax(S)])\n fftY.append(np.transpose(np.array([f,S])))\n\nf=np.array([fftY[i][:,0] for i in range(0,len(fftY))]).T\nS=np.array([fftY[i][:,1] for i in range(0,len(fftY))]).T\n\nmaxIndex=np.array(np.where(S==np.max(S)))\nfmax=f[maxIndex[0],maxIndex[1]]\nSmax=S[maxIndex[0],maxIndex[1]]\n\nmax_f_floor = []\nfor i in range (1,41):\n max_f_floor.append(i)\n\nlatexify()\nfig1, (ax1) = plt.subplots(1)\naxlist=[ax1]\nfor i in range(0,nFloors): \n ax1.plot(f[:,i],S[:,i],color='grey',alpha=0.25)\n\nax1.plot(f[:,maxIndex[1]],S[:,maxIndex[1]],color=new_colors[1],label=r'$max\\{f,S,floor\\}=\\{'+str(fmax)+','+str(Smax)+','+str(maxIndex[1])+'\\}$')\nax1.set_xlabel(r'$f_{y_i}\\,[Hz]$')\nax1.set_ylabel(r'$S\\,[-]$')\nax1.set_xscale('log')\nax1.set_yscale('log')\nplt.legend()\n# savefig(floorPath + '/_floorPlots' +'ffty_Ampl')\n\n\n\n\n\nlatexify()\nfig1, (ax1) = plt.subplots(1)\nax1.plot(max_f_over_heightX,max_f_floor,color='grey',alpha=0.25)\nsavefig(floorPath + '/_floorPlots' +'floor_maxf')\n\n","repo_name":"BeneStrahm/cfdPostProcessing","sub_path":"tFFT.py","file_name":"tFFT.py","file_ext":"py","file_size_in_byte":17270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26142160483","text":"import random\nimport select_sort\n\n\ndef ran(num):\n random_list = []\n for i in range(0, num):\n random_list.append(int(random.random() * 100))\n return random_list\n\n\ndef search(list_to_search, target, left, right):\n middle = (left + right) // 2\n middle_num = list_to_search[middle]\n if left == right:\n return None\n if target == middle_num:\n return middle\n elif target > middle_num:\n return search(list_to_search, target, middle + 1, right)\n elif target < middle_num:\n return search(list_to_search, target, left, middle)\n\n\nlist1 = ran(100)\nsorted_list = select_sort.sort(list1)\nsorted_list.reverse()\nresult = search(sorted_list, 78, 0, len(sorted_list)-1)\nprint(result)\n\n\ndef greatest_common_divider(x, y):\n if x == y:\n return x\n elif x > y:\n n = y\n else:\n n = x\n while n > 0:\n if x % n == 0:\n if y % n == 0:\n return n\n n -= 1\n\n\nprint(greatest_common_divider(81, 27))\nprint(greatest_common_divider(6, 9))\nprint(greatest_common_divider(1680, 640))\n","repo_name":"Zhaoxizhen/MyPythonWorkplace","sub_path":"define_function.py","file_name":"define_function.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12634451910","text":"from operator import itemgetter\nfrom pathlib import Path\n\nimport parse\nfrom tqdm import trange\n\n\ndef sensor_coverage_at_distance(sensors, target_y):\n ex_ranges = []\n for (sensx, sensy), (beacx, beacy) in sensors:\n dist = abs(sensx - beacx) + abs(sensy - beacy)\n dist_at_ty = dist - abs(target_y - sensy)\n if dist_at_ty > 0:\n ex_ranges.append((sensx-dist_at_ty, sensx+dist_at_ty))\n ex_ranges.sort(key=itemgetter(0))\n return ex_ranges\n\n\ndef merge_ranges(ex_ranges):\n idx = 0\n while idx < len(ex_ranges)-1:\n r1start, r1end = ex_ranges[idx]\n r2start, r2end = ex_ranges[idx+1]\n if r2end <= r1end:\n del ex_ranges[idx+1]\n elif r2start <= r1end < r2end or r2start == (r1end+1):\n ex_ranges[idx] = (r1start, r2end)\n del ex_ranges[idx+1]\n else:\n idx += 1\n return ex_ranges\n\n\ndef a(sensors, target_y):\n \"\"\"Solve day 15 part 1\"\"\"\n ex_ranges = sensor_coverage_at_distance(sensors, target_y)\n ex_ranges = merge_ranges(ex_ranges)\n return sum(end-start for start, end in ex_ranges)\n\n\ndef b(sensors, half_size):\n \"\"\"Solve day 15 part 2\"\"\"\n for target_y in trange(2*half_size + 1):\n ex_ranges = sensor_coverage_at_distance(sensors, target_y)\n ex_ranges = merge_ranges(ex_ranges)\n\n if len(ex_ranges) != 1:\n (_, r1end), (r2start, _) = ex_ranges\n return ((r1end+r2start)//2 * 4_000_000) + target_y\n\n\ndef parse_file(f: Path):\n \"\"\"Parse the input file into relevant data structure\"\"\"\n lines = f.read_text().splitlines()\n template = parse.compile(\"Sensor at x={:d}, y={:d}: closest beacon is at x={:d}, y={:d}\")\n sensors = []\n for line in lines:\n sensx, sensy, beacx, beacy = template.parse(line)\n sensors.append(((sensx, sensy), (beacx, beacy)))\n\n return sensors\n\n\ndef main():\n \"\"\"Main function to wrap variables\"\"\"\n files = [\n ('input15-test1.txt', 10),\n ('input15.txt', 2_000_000),\n ]\n for filename, target_y in files:\n print(filename)\n data = parse_file(Path(filename))\n\n print(f'A: {a(data, target_y)}')\n print(f'B: {b(data, half_size=target_y)}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sjvrijn/AdventofCode","sub_path":"Sander/2022/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"70111118162","text":"import re\nfrom typing import List\n\nimport rake_nltk\nfrom rake_nltk import Metric\nimport nltk\ndef find_deg(l, x):\n t = len(l)\n for i in l:\n t += i.count(\" \")\n q = t\n for i, e in enumerate(l):\n if re.search(fr'\\b({x})\\b', e, re.I):\n return (q - (e.count(\" \"))/(2*t)) / t\n q -= 1 + e.count(\" \")\n return 0.0\n\ndef find_freq(l,x):\n if x in l:\n return 1-l.index(x)/len(l)\n return 0.25\n\ngood=['FW', 'JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NPS', 'RB', 'RBR', 'RBS', 'RP', 'VB', 'VBD', 'VBG', 'VBN', 'VBZ', 'WP', 'WRB']\ndef get_important(splitted:List[str]):\n print(\"split\",splitted)\n freq_r = rake_nltk.Rake(max_length=3, ranking_metric=Metric.WORD_FREQUENCY)\n freq_r.extract_keywords_from_text(\" and \".join(splitted))\n freq_phrases = freq_r.get_ranked_phrases()\n\n deg_r = rake_nltk.Rake(max_length=3, ranking_metric=Metric.WORD_DEGREE)\n deg_r.extract_keywords_from_text(\" \".join(splitted))\n deg_phrases=deg_r.get_ranked_phrases()\n\n freq_data={i:find_freq(freq_phrases,i.lower()) for i in splitted}\n deg_data={i: find_deg(deg_phrases,i.lower()) for i in splitted}\n data={}\n for k,freq_v in freq_data.items():\n deg_v=deg_data[k]\n data[k]=freq_v+deg_v\n part_of_speech=nltk.pos_tag(splitted)\n print(part_of_speech)\n for i,part in part_of_speech:\n if part not in good:\n data[i]*=0.5\n if i.lower() in ['thing','things', 'yeah', 'yes', 'something','actually','really', 'life', 'lives', 'lot', 'ok', 'oh', 'well', 'stuff', 'talking', 'look', 'looking', 'talking',\n 'isn\\'t', 'that\\'s', 'right']:\n data[i]*=0.1\n \"\"\"\nFW foreign word\nJJ adjective 'big'\nJJR adjective, comparative 'bigger'\nJJS adjective, superlative 'biggest'\nNN noun, singular 'desk'\nNNS noun plural 'desks'\nNNP proper noun, singular 'Harrison'\nNNPS proper noun, plural 'Americans'\nRB adverb very, silently,\nRBR adverb, comparative better\nRBS adverb, superlative best\nRP particle give up\nVB verb, base form take\nVBD verb, past tense took\nVBG verb, gerund/present participle taking\nVBN verb, past participle taken\nVBZ verb, 3rd person sing. present takes\nWP wh-pronoun who, what\nWRB wh-abverb where, when\n\"\"\"\n # print(\"deg_phrases\",deg_phrases)\n # print(\"deg_data\",deg_data)\n # print(\"freq_phrases\",freq_phrases)\n # print(\"freq_data\",freq_data)\n return [j[0] for j in sorted(data.items(),key=lambda i:-i[1])[:3]]","repo_name":"dkter/ignitify","sub_path":"src/text_analysis.py","file_name":"text_analysis.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"70970862482","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pytangle\",\n version=\"0.0.3\",\n author=\"Mattia Samory\",\n author_email=\"mattia.samory@gmail.com\",\n description=\"A python wrapper for crowdtangle API endpoints\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/hide-ous/pytangle/\",\n download_url=\"https://github.com/hide-ous/pytangle/archive/v0.0.3.tar.gz\",\n keywords=['crowdtangle', 'api', 'wrapper'],\n packages=setuptools.find_packages(),\n install_requires=[\n \"requests>=2.9.1\",\n \"ratelimit>=2.2.1\",\n \"python_dateutil>=2.8.1\",\n ],\n extras_require={'examples': [\"schedule>=0.6.0\"]},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.5',\n)\n","repo_name":"hide-ous/pytangle","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"6655329569","text":"#https://leetcode.com/problems/climbing-stairs/\n\nclass Solution:\n def climbStairs(self, n: int) -> int:\n m = [0] * (n + 1)\n m[0] = m[1] = 1\n for i in range(2, n+1):\n m[i] = m[i-2]+m[i-1]\n print(m)\n return m[-1]\n\nif __name__ == '__main__':\n s = Solution()\n print(s.climbStairs(6))","repo_name":"Weizhang2017/leetcode_solutions","sub_path":"climb_stairs.py","file_name":"climb_stairs.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1725256446","text":"import pygame\nimport random\nfrom enum import Enum\n\n\nclass SlotMachine:\n def __init__(self):\n self.results = 3 * [0]\n self.prev_results = 3 * [0]\n self.display_spinning = self.prev_results.copy()\n self.spinning = False\n self.can_spin = True\n\n def spin(self):\n random.seed()\n # Chance to be woman, black, and in poverty, respectively.\n chance = [50, 25, 15]\n # chance = [100, 0, 100]\n\n # For each reel, choose between two outcomes based on the chance.\n for reel in range(3):\n rand_chance = random.randint(0, 100)\n self.results[reel] = 0 if rand_chance < chance[reel] else 1\n\n def switch_spin_icons(self):\n for reel in range(3):\n self.display_spinning[reel] = 0 if self.display_spinning[reel] is 1 else 1\n\n def set_prev_results_to_results(self):\n self.prev_results = self.results.copy()\n\n\nclass Direction(Enum):\n UP = 1\n DOWN = 2\n LEFT = 4\n RIGHT = 8\n\n\nclass Maze:\n # Down, Left, Up, Right\n NEIGHBORS = {Direction.UP: [-1, 0],\n Direction.DOWN: [1, 0],\n Direction.LEFT: [0, -1],\n Direction.RIGHT: [0, 1]\n }\n\n DIRECTIONS = [Direction.UP, Direction.DOWN, Direction.LEFT, Direction.RIGHT]\n\n OPPOSITE_DIRECTION = {Direction.UP: Direction.DOWN,\n Direction.DOWN: Direction.UP,\n Direction.LEFT: Direction.RIGHT,\n Direction.RIGHT: Direction.LEFT\n }\n\n def __init__(self, height, width):\n self.height = height\n self.width = width\n self.max_distance = 0\n\n random.seed()\n self.Maze = self.generate_maze()\n # self.win_sprite_coord = (0, 1)\n self.win_sprite_coord = self.find_farthest_cell_from_start()\n\n def generate_maze(self):\n maze = [[Cell() for j in range(self.width)] for i in range(self.height)]\n self.generate_rand_maze(0, 0, maze)\n\n return maze\n\n def find_farthest_cell_from_start(self):\n for x in range(self.height):\n for y in range(self.width):\n self.Maze[x][y].visited = False\n\n queue = list()\n queue.append((0, 0))\n curr_coord = None\n\n while len(queue) > 0:\n curr_coord = queue.pop(0)\n x = curr_coord[0]\n y = curr_coord[1]\n curr = self.Maze[x][y]\n curr.visited = True\n for direction in self.DIRECTIONS:\n if curr.open_walls & direction.value is not 0:\n n_x = x + self.NEIGHBORS[direction][0]\n n_y = y + self.NEIGHBORS[direction][1]\n\n if self.__within_bounds(n_x, n_y) and not self.Maze[n_x][n_y].visited:\n queue.append((n_x, n_y))\n\n return curr_coord\n\n def generate_rand_maze(self, i, j, maze):\n if self.__within_bounds(i, j) and not maze[i][j].visited:\n maze[i][j].visited = True\n unvisited_dir = self.get_unvisited_neighbors(i, j, maze)\n\n while len(unvisited_dir) > 0:\n # Choose a random unvisited direction to break down wall\n rand_dir = unvisited_dir[random.randint(0, len(unvisited_dir) - 1)]\n maze[i][j].open_walls |= rand_dir.value\n\n next_cell_x = i + self.NEIGHBORS[rand_dir][0]\n next_cell_y = j + self.NEIGHBORS[rand_dir][1]\n\n opposite = self.OPPOSITE_DIRECTION[rand_dir]\n maze[next_cell_x][next_cell_y].open_walls |= opposite.value\n\n self.generate_rand_maze(next_cell_x, next_cell_y, maze)\n\n unvisited_dir = self.get_unvisited_neighbors(i, j, maze)\n\n def get_unvisited_neighbors(self, i, j, maze):\n \"\"\" Returns unvisited neighbors \"\"\"\n unvisited = []\n for direction in self.DIRECTIONS:\n neighbor = self.NEIGHBORS[direction]\n n_x = i + neighbor[0]\n n_y = j + neighbor[1]\n if self.__within_bounds(n_x, n_y) and not maze[n_x][n_y].visited:\n unvisited.append(direction)\n\n return unvisited\n\n def __within_bounds(self, i, j):\n return 0 <= i < self.height and 0 <= j < self.width\n\n @staticmethod\n def __mirror_direction(direction):\n # if Right or Down return Left and Up, otherwise do the opposite.\n return direction + 2 if direction <= 1 else direction - 2\n\n def print_maze(self):\n for y in range(self.height):\n for x in range(self.width):\n print(self.Maze[y][x].open_walls, '\\t'),\n print()\n\n\nclass Cell:\n def __init__(self):\n self.visited = False\n self.open_walls = 0 # bitmap representing whether the cell's wall is open or closed.\n\n\nclass RestockShelfGame:\n ITEMS = ['condiments', 'steak', 'sushi', 'taco', 'watermelon']\n\n def __init__(self):\n self.shelf_order = None\n self.stock_order = None\n self.reshuffle_items()\n\n def reshuffle_items(self):\n random.shuffle(self.ITEMS)\n self.shelf_order = self.ITEMS.copy()\n random.shuffle(self.ITEMS)\n self.stock_order = self.ITEMS.copy()\n\n\nclass Player:\n def __init__(self, px, py, offset_x, offset_y):\n # Save the start position\n # x and y pixel coordinates\n self.start_px = self.px = px\n self.start_py = self.py = py\n self.offset_x = offset_x\n self.offset_y = offset_y\n self.old_px = self.old_py = None\n self.x = 0\n self.y = 0\n\n self.tiles_moved_since_reset = 0\n\n def reset(self):\n self.px = self.start_px\n self.py = self.start_py\n\n def get_offset_px(self):\n return self.px + self.offset_x, self.py + self.offset_y\n\n def store_curr_pos_as_old_pos(self):\n self.old_px = self.px\n self.old_py = self.py\n\n def set_curr_pos_to_old_pos(self):\n self.px = self.old_px\n self.py = self.old_py\n self.tiles_moved_since_reset = 0\n\n def update_coord(self, x, y):\n self.x = x\n self.y = y\n self.tiles_moved_since_reset += 1\n\nclass Model:\n TILES_MOVED_TO_RESET = 65\n TILES_TO_MOVE_BACK = 10\n\n def __init__(self):\n self.slot_machine = SlotMachine()\n self.maze = Maze(14, 30)\n self.player = None\n self.shelf_game = RestockShelfGame()\n self.player_won = False\n self.prepped = False\n\n def init_player(self, x, y, offset_x, offset_y):\n self.player = Player(x, y, offset_x, offset_y)\n\n def move_player(self, direction, block_size, player_radius):\n if self.__can_move(direction, block_size, player_radius):\n if direction is Direction.UP:\n self.player.py -= 1\n elif direction is Direction.DOWN:\n self.player.py += 1\n elif direction is Direction.LEFT:\n self.player.px -= 1\n elif direction is Direction.RIGHT:\n self.player.px += 1\n\n def __can_move(self, direction, block_size, player_radius):\n # Calculate which cells the player occupies\n square = block_size * 4\n p1 = (self.player.px, self.player.py) # Top left corner of player\n p2 = (p1[0] + square - 1, p1[1] + square - 1) # Bottom right corner of player\n\n # Convert to maze index\n x_p1 = p1[1] // square\n y_p1 = p1[0] // square\n x_p2 = p2[1] // square\n y_p2 = p2[0] // square\n\n self.__update_player_coord(x_p1, x_p2, y_p1, y_p2)\n if self.should_prep_for_reset() and not self.prepped:\n self.player.store_curr_pos_as_old_pos()\n self.prepped = True\n\n # 2d array of cells\n maze = self.maze.Maze\n\n if self.__player_won(x_p1, x_p2, y_p1, y_p2):\n self.player_won = True\n # if direction is Direction.UP:\n # If the sprite is in the cell\n if x_p1 == x_p2 and y_p1 == y_p2:\n return maze[x_p1][y_p1].open_walls & direction.value\n else:\n dir_coord = self.maze.NEIGHBORS[direction]\n if direction is Direction.DOWN or direction is Direction.RIGHT:\n return x_p1 + dir_coord[0] == x_p2 and y_p1 + dir_coord[1] == y_p2\n else:\n return x_p2 + dir_coord[0] == x_p1 and y_p2 + dir_coord[1] == y_p1\n\n def __player_won(self, x_1, x_2, y_1, y_2):\n win_x = self.maze.win_sprite_coord[0]\n win_y = self.maze.win_sprite_coord[1]\n if x_1 == win_x and y_1 == win_y and x_2 == win_x and y_2 == win_y:\n return True\n\n def __update_player_coord(self, x_1, x_2, y_1, y_2):\n if self.player.x != x_1 or self.player.y != y_1:\n self.player.update_coord(x_1, y_1)\n # elif self.player.x != x_2 or self.player.y != y_2:\n # self.player.update_coord(x_2, y_2)\n\n def should_prep_for_reset(self):\n return self.TILES_MOVED_TO_RESET <= self.player.tiles_moved_since_reset\n\n def should_reset(self):\n return self.TILES_MOVED_TO_RESET + self.TILES_TO_MOVE_BACK <= self.player.tiles_moved_since_reset\n\n def reset_player(self):\n self.player.set_curr_pos_to_old_pos()\n self.prepped = False\n\n\n","repo_name":"alextheastronaut/life_game","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":9251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73655819600","text":"# import libraries\r\nimport datetime\r\nimport warnings\r\nimport calendar\r\nimport random\r\nimport wikipedia\r\nimport speech_recognition as sr\r\nimport requests, json\r\nimport pyttsx3\r\nimport nltk, re, pprint\r\nfrom nltk.corpus import stopwords\r\nfrom nltk import WordNetLemmatizer\r\nfrom nltk.stem import SnowballStemmer\r\nimport pywhatkit\r\n# nltk.download('words') # Descomenta esto para que se descargue\r\n# nltk.download('punkt')\r\n# nltk.download('stopwords')\r\n# nltk.download('averaged_perceptron_tagger')\r\n# nltk.download('maxent_ne_chunker')\r\n# nltk.download('wordnet')\r\nfrom weather_request import WeatherRequest\r\nimport webbrowser as web\r\nimport requests\r\nfrom lxml import html\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\ncommands_key_words = {\"WEATHER\": [\"temperature\", \"raining\", \"weather\", \"snowing\"], \"YOUTUBE\": [\"play\", \"youtube\"],\r\n \"GOOGLE\": [\"search\", \"google\", \"Google\"], \"WIKIPEDIA\": [\"look\", \"summarize\", \"for\", \"Wikipedia\", \"wikipedia\"],\r\n \"LOCATION\": [\"location\", \"what\"],\r\n \"HOUR\": [\"hour\", \"time\"]}\r\nweatherGrammar = r\"\"\"\r\n WEATHER: {?+}\r\n {+}\r\n\"\"\"\r\nyoutubeGrammar = r\"\"\"\r\n YOUTUBE: {<.*>*}\r\n {<.*>*}\r\n \r\n\"\"\"\r\n\r\n# Search in google something\r\ngoogleGrammar = r\"\"\"\r\n GOOGLE: {<.*>*}\r\n\"\"\"\r\n\r\nwikipediaGrammar = r\"\"\"\r\n WIKIPEDIA: {<.*>*}\r\n {<.*>*}\r\n\"\"\"\r\n\r\nlocationGrammar = r\"\"\"\r\n LOCATION: {<.*>*}\r\n\"\"\"\r\n\r\nhourGrammar = r\"\"\"\r\n HOUR: {}\r\n\"\"\"\r\n\r\nclass Annie:\r\n def __init__(self):\r\n self.foundCommand = False\r\n self.engine = pyttsx3.init()\r\n self.__setEngine()\r\n self.name = 'user'\r\n self.commands = {\"WEATHER\": self.weather, \"YOUTUBE\": self.youtube, \"GOOGLE\": self.google, \"WIKIPEDIA\": self.wikipedia, \"LOCATION\": self.location, \"HOUR\": self.hour}\r\n self.weather_request = WeatherRequest()\r\n\r\n # Sets the gtts voice and run the engine\r\n def __setEngine(self):\r\n voices = self.engine.getProperty('voices')\r\n self.engine.setProperty('voice', voices[1].id)\r\n self.engine.runAndWait()\r\n\r\n # Starts the recording of audio and return the phrase the user said\r\n def recordAudio(self):\r\n recognizer = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n recognizer.adjust_for_ambient_noise(source)\r\n self.assistantResponse('Say something')\r\n audio = recognizer.listen(source)\r\n try:\r\n phrase = ''\r\n phrase = recognizer.recognize_google(audio, show_all=False) # generate a list of possible transcriptions\r\n except KeyError:\r\n print('I could not understand')\r\n except sr.UnknownValueError:\r\n print('I could not understand')\r\n except sr.RequestError as e:\r\n print('Request error from google speech recognition' + format(e))\r\n return str(phrase[0].lower() + phrase[1:])\r\n\r\n # Play Annie response\r\n def assistantResponse(self, text):\r\n print(text)\r\n self.engine.say(text)\r\n self.engine.runAndWait()\r\n\r\n def youtube(self, chunk, keywords):\r\n self.assistantResponse(\"Opening video on Youtube\")\r\n video = []\r\n for word in chunk:\r\n if type(word) is tuple and word[0] not in commands_key_words[\"YOUTUBE\"]:\r\n video.append(word[0])\r\n separator = ' '\r\n pywhatkit.playonyt(separator.join(video))\r\n\r\n def google(self, chunk, keywords):\r\n print(\"Buscando en google\")\r\n toSearch = []\r\n for word in chunk:\r\n if word[0] not in commands_key_words[\"GOOGLE\"]:\r\n toSearch.append(word[0])\r\n separator = ' '\r\n pywhatkit.search(separator.join(toSearch))\r\n\r\n def googleDefault(self, phrase):\r\n pywhatkit.search(phrase)\r\n\r\n def wikipedia(self, chunk, keywords):\r\n wikipedia.set_lang(\"en\")\r\n print(\"Buscando en wikipedia\")\r\n url = 'https://es.wikipedia.org/wiki/'\r\n toSearch = []\r\n for word in chunk:\r\n if word[0] not in commands_key_words[\"WIKIPEDIA\"]:\r\n toSearch.append(word[0])\r\n newsearch = \" \".join(toSearch)\r\n listofelements = wikipedia.search(newsearch)\r\n page = wikipedia.page(listofelements[0]).url\r\n web.open(page)\r\n\r\n def location(self, chunk, keywords):\r\n location = []\r\n for word in chunk:\r\n if word[0] not in commands_key_words[\"LOCATION\"]:\r\n location.append(word[0])\r\n finalLocationsString = \" \".join(location)\r\n url = 'https://google.es/maps/place/' + finalLocationsString + '/&'\r\n web.open(url)\r\n\r\n # Access the TimeZone API and request the hour in a location\r\n def hour(self, chunk, keywords):\r\n country = ''\r\n for word in chunk.subtrees(filter=lambda tagged: tagged.label() == 'GPE'):\r\n for location in word:\r\n country = location[0]\r\n page_content = requests.get(\r\n 'https://www.timeanddate.com/worldclock/' + country\r\n )\r\n tree = html.fromstring(page_content.content)\r\n variable = tree.xpath(\"/html/body/main/article/section[1]/div[1]/div/span[1]/text()\")\r\n self.assistantResponse(\" \".join(variable).replace(':', ' '))\r\n\r\n # Access weather API and request the weather for a location\r\n def weather(self, chunk, keywords):\r\n locations = []\r\n for word in chunk.subtrees(filter=lambda t: t.label() == 'GPE'):\r\n for location in word:\r\n locations.append(location[0])\r\n\r\n for location in locations:\r\n self.assistantResponse(self.weather_request.getWeather(location))\r\n\r\n\r\n # Search in the tagged tree if the label WEATHER has been set\r\n # in that case it checks the keywords and if match them it will\r\n # call the weather API\r\n def checkChunks(self, tagged_tree, label, pos_keywords):\r\n if not self.foundCommand:\r\n for subtree in tagged_tree.subtrees(filter=lambda t: t.label() == label):\r\n keywords = []\r\n print(subtree)\r\n for word in subtree:\r\n if type(word) is tuple:\r\n if word[1] in pos_keywords and word[0].lower() in commands_key_words[label]:\r\n keywords.append(word[0])\r\n else:\r\n for sub_chunk_word in word:\r\n if sub_chunk_word[1] in pos_keywords and sub_chunk_word[0].lower() in commands_key_words[label]:\r\n keywords.append(sub_chunk_word[0])\r\n\r\n if keywords:\r\n self.commands[label](subtree, keywords)\r\n self.foundCommand = True\r\n\r\n def parseInput(self, phrase):\r\n self.foundCommand = False\r\n clean_tagged = self.tokenize(phrase)\r\n print(clean_tagged)\r\n weather_chunked = self.__chunk(self.ne_chunk(clean_tagged), weatherGrammar)\r\n self.checkChunks(weather_chunked, 'WEATHER', ['NN', 'VBG'])\r\n youtube_chunked = self.__chunk(clean_tagged, youtubeGrammar)\r\n self.checkChunks(youtube_chunked, 'YOUTUBE', ['NN', 'NNP', 'VB'])\r\n google_chunked = self.__chunk(clean_tagged, googleGrammar)\r\n self.checkChunks(google_chunked, 'GOOGLE', ['NN', 'NNP', 'NNP'])\r\n wiki_chunked = self.__chunk(clean_tagged, wikipediaGrammar)\r\n self.checkChunks(wiki_chunked, 'WIKIPEDIA', ['NN', 'NNP', 'VB'])\r\n location_chunked = self.__chunk(clean_tagged, locationGrammar)\r\n self.checkChunks(location_chunked, 'LOCATION', ['NN', 'NNP'])\r\n hour_chunked = self.__chunk(self.ne_chunk(clean_tagged), hourGrammar)\r\n self.checkChunks(hour_chunked, 'HOUR', ['NN'])\r\n if not self.foundCommand:\r\n if phrase == \"are you okay\" or phrase == \"annie are you okay\":\r\n self.assistantResponse(\"I've been hit by, I've been struck by, a smooth criminal\")\r\n else:\r\n self.googleDefault(phrase)\r\n\r\n # We remove the stopwords of the sentence\r\n def __cleanInput(self, tokens):\r\n stop_words = set(stopwords.words('english'))\r\n return [w for w in tokens if not w in stop_words]\r\n\r\n # We obtain the lemma of each word\r\n def __lemmatisation(self, tokens):\r\n lemma = WordNetLemmatizer()\r\n return [lemma.lemmatize(w) for w in tokens]\r\n\r\n def __chunk(self, tokens, grammar):\r\n cp = nltk.RegexpParser(grammar)\r\n return cp.parse(tokens)\r\n\r\n def filt(self, x, chunk_word):\r\n return x.label() == chunk_word\r\n\r\n def ne_chunk(self, tagged):\r\n return nltk.ne_chunk(tagged)\r\n\r\n def tokenize(self, phrase):\r\n tokens = nltk.word_tokenize(phrase)\r\n clean_tokens = self.__cleanInput(tokens)\r\n lemmatisation = self.__lemmatisation(clean_tokens)\r\n tagged = nltk.pos_tag(lemmatisation) # las clasificamos por verbo, sustantivo...\r\n return tagged # Comprueba si hay nombres propios, de ciudades...\r\n\r\n","repo_name":"alu0101030531/AnnieVirtualAssistant","sub_path":"code/annie.py","file_name":"annie.py","file_ext":"py","file_size_in_byte":9146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27057508213","text":"\"\"\"Summary statistics are displayed on the summary tiles of the strategies.\"\"\"\nimport datetime\nfrom time import perf_counter\nfrom typing import Optional\nimport logging\n\nimport pandas as pd\n\nfrom tradeexecutor.state.state import State\nfrom tradeexecutor.statistics.key_metric import calculate_key_metrics\nfrom tradeexecutor.strategy.execution_context import ExecutionMode\nfrom tradeexecutor.strategy.summary import StrategySummaryStatistics\nfrom tradeexecutor.visual.equity_curve import calculate_compounding_realised_trading_profitability\nfrom tradeexecutor.visual.web_chart import export_time_series\n\nlogger = logging.getLogger(__name__)\n\n\ndef calculate_summary_statistics(\n state: State,\n execution_mode: ExecutionMode = ExecutionMode.one_off,\n time_window = pd.Timedelta(days=90),\n now_: Optional[pd.Timestamp | datetime.datetime] = None,\n legacy_workarounds=False,\n backtested_state: State | None = None,\n key_metrics_backtest_cut_off = datetime.timedelta(days=90),\n) -> StrategySummaryStatistics:\n \"\"\"Preprocess the strategy statistics for the summary card in the web frontend.\n\n TODO: Rename to `calculate_strategy_preview_statistics()`.\n\n To test out in the :ref:`console`:\n\n .. code-block:: python\n\n from tradeexecutor.statistics.summary import calculate_summary_statistics\n from tradeexecutor.strategy.execution_context import ExecutionMode\n\n calculate_summary_statistics(state, ExecutionMode.preflight_check)\n\n :param state:\n Strategy state from which we calculate the summary\n\n :param execution_mode:\n If we need to skip calculations during backtesting.\n\n :param time_window:\n How long we look back for the summary statistics\n\n :param now_:\n Override current time for unit testing.\n\n Set this to the date of the last trade.\n\n :param legacy_workarounds:\n Skip some calculations on old data, because data is missing.\n\n :param backtested_state:\n The result of the earlier backtest run.\n\n The live web server needs to show backtested metrics on the side of\n live trading metrics. This state is used to calculate them.\n\n :param key_metrics_backtest_cut_off:\n How many days live data is collected until key metrics are switched from backtest to live trading based,\n\n :return:\n Summary calculations for the summary tile,\n or empty `StrategySummaryStatistics` if cannot be calculated.\n \"\"\"\n\n logger.info(\"calculate_summary_statistics() for %s\", state.name)\n func_started_at = perf_counter()\n\n portfolio = state.portfolio\n\n # We can alway get the current value even if there are no trades\n current_value = portfolio.get_total_equity()\n\n strategy_start, strategy_end = state.get_strategy_time_range()\n\n first_trade, last_trade = portfolio.get_first_and_last_executed_trade()\n\n first_trade_at = first_trade.executed_at if first_trade else None\n last_trade_at = last_trade.executed_at if last_trade else None\n\n if not now_:\n now_ = pd.Timestamp.utcnow().tz_localize(None)\n\n start_at = now_ - time_window\n if strategy_start and strategy_end:\n age = strategy_end - strategy_start\n else:\n age = None\n\n stats = state.stats\n\n profitability_90_days = None\n enough_data = False\n performance_chart_90_days = None\n returns_all_time = returns_annualised = None\n\n if len(stats.portfolio) > 0 and not legacy_workarounds:\n profitability = calculate_compounding_realised_trading_profitability(state)\n enough_data = len(profitability.index) > 1 and profitability.index[0] <= start_at\n profitability_time_windowed = profitability[start_at:]\n if len(profitability_time_windowed) > 0:\n profitability_daily = profitability_time_windowed.resample(pd.offsets.Day()).max()\n # We do not generate entry for dates without trades so forward fill from the previous day\n profitability_daily = profitability_daily.ffill()\n profitability_90_days = profitability_daily.iloc[-1]\n performance_chart_90_days = export_time_series(profitability_daily)\n returns_all_time = profitability.iloc[-1]\n else:\n profitability_90_days = None\n performance_chart_90_days = None\n\n if age and returns_all_time:\n returns_annualised = returns_all_time * datetime.timedelta(days=365) / age\n\n key_metrics = {m.kind.value: m for m in calculate_key_metrics(state, backtested_state, required_history=key_metrics_backtest_cut_off)}\n\n logger.info(\"calculate_summary_statistics() finished, took %s seconds\", perf_counter() - func_started_at)\n\n return StrategySummaryStatistics(\n first_trade_at=first_trade_at,\n last_trade_at=last_trade_at,\n enough_data=enough_data,\n current_value=current_value,\n profitability_90_days=profitability_90_days,\n performance_chart_90_days=performance_chart_90_days,\n key_metrics=key_metrics,\n launched_at=state.created_at,\n backtest_metrics_cut_off_period=key_metrics_backtest_cut_off,\n return_all_time=returns_all_time,\n return_annualised=returns_annualised,\n )\n","repo_name":"tradingstrategy-ai/trade-executor","sub_path":"tradeexecutor/statistics/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"3"} +{"seq_id":"5774198716","text":"def ImmediateNeighbors(pattern):\n neighbors = set()\n nucleotides = {'A', 'C', 'G', 'T'}\n len_pat=len(pattern)\n for i in range(len_pat):\n for n in nucleotides:\n neighbors.add(pattern[:i]+n+pattern[i+1:])\n return neighbors\n\ndef iterative_neighbors(pattern, d):\n if d == 0:\n return {pattern}\n imneighbor = ImmediateNeighbors(pattern)\n neighbor = imneighbor\n for j in range(d-1):\n for p in imneighbor:\n neighbor = neighbor.union(ImmediateNeighbors(p))\n imneighbor = neighbor\n return neighbor","repo_name":"Neurojedi/Biofunctions","sub_path":"utils/iterative_neighbors.py","file_name":"iterative_neighbors.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"70213609362","text":"import networkx as nx\n\nwith open('input') as f:\n ls = f.readlines()\n\n# Part one\nportals = {}\nfor i in range(len(ls)):\n for j in range(len(ls[i])):\n # Vertical portals\n if ls[i][j].isupper() and i > 0 and ls[i-1][j].isupper():\n if i < len(ls)-1 and ls[i+1][j] == '.':\n portals[i+1, j] = set([ls[i][j], ls[i-1][j]])\n else:\n portals[i-2, j] = set([ls[i][j], ls[i-1][j]])\n # Horizontal portals\n elif ls[i][j].isupper() and j > 0 and ls[i][j-1].isupper():\n if j < len(ls[2])-1 and ls[i][j+1] == '.':\n portals[i, j+1] = set([ls[i][j], ls[i][j-1]])\n else:\n portals[i, j-2] = set([ls[i][j], ls[i][j-1]])\n\nstart = next(k for k, v in portals.items() if v == set(['A']))\nend = next(k for k, v in portals.items() if v == set(['Z']))\n\nG = nx.grid_2d_graph(len(ls), len(ls[0]))\nfor i in range(len(ls)):\n for j in range(len(ls[i])):\n if ls[i][j] != '.':\n G.remove_node((i, j))\n\nfor p1, portal1 in portals.items():\n for p2, portal2 in portals.items():\n if portal1 == portal2 and p1 != p2:\n G.add_edge(p1, p2)\n\nprint(nx.shortest_path_length(G, start, end))\n\n# Part two\nportal_is_outer = {p: p[0] in (2, len(ls)-3) or p[1] in (2, len(ls[2])-4) for p in portals}\n\n# We're lazy and simply assume that the optimal solution uses no more than 100\n# levels; if it does, we can simply increase the number.\nG = nx.Graph()\nfor depth in range(100):\n for i in range(len(ls)):\n for j in range(len(ls[i])):\n if ls[i][j] == '.':\n if ls[i-1][j] == '.':\n G.add_edge((i, j, depth), (i-1, j, depth))\n if ls[i][j-1] == '.':\n G.add_edge((i, j, depth), (i, j-1, depth))\n if (i, j) in portals:\n for (i2, j2), portal in portals.items():\n if portals[i, j] == portal and (i2, j2) != (i, j):\n if not portal_is_outer[i, j]:\n G.add_edge((i, j, depth), (i2, j2, depth+1))\n\nprint(nx.shortest_path_length(G, (*start, 0), (*end, 0)))\n","repo_name":"fuglede/adventofcode","sub_path":"2019/day20/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"3"} +{"seq_id":"32188718109","text":"from flask import Flask, render_template, request, redirect\nfrom flask_mail import Mail, Message\nimport os\napp = Flask(__name__)\n\n\napp.config['DEBUG'] = False\napp.config['MAIL_SERVER'] = 'smtp.googlemail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')\napp.config['MAIL_PASSWORD'] = os.environ.get('MAIL_PASSWORD')\n\nmail = Mail(app)\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/formSent\", methods=['GET','POST'])\ndef formSent():\n #if receives get method, just redirect to \"/\"\n if request.method == 'GET':\n pass\n #Else send the msg to mi inbox\n else:\n name = request.form.get(\"name\")\n email = request.form.get(\"email\")\n content = request.form.get(\"message\")\n print(content)\n msg = Message(subject=\"Mensaje desde akdemia.es\",body=content + \" - remitente: \"+email ,sender=\"andreskammerath@gmail.com\",\n recipients=[app.config['MAIL_USERNAME'],\"torres.luciana1941@gmail.com\"])\n msg2 = Message(subject=\"Correo recibido en akdemia.es\",body=\"Gracias por escribirnos. Muy pronto nos contactaremos.\",sender=\"andreskammerath@gmail.com\",\n recipients=[email])\n mail.send(msg)\n mail.send(msg2)\n return redirect(\"/\")\n\n@app.route(\"/services\")\ndef services():\n return render_template(\"services.html\")\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"Andreskammerath/akdmia","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25147000203","text":"import nidcpower\n\nwith nidcpower.Session(resource_name=\"PXI1Slot1\", reset=True, options={}) as session:\n session.source_mode = nidcpower.SourceMode.SINGLE_POINT\n session.output_function = nidcpower.OutputFunction.DC_VOLTAGE\n session.voltage_level = 2\n session.current_limit = 0.01\n session.voltage_level_range = 10\n session.current_limit_range = 0.01\n session.source_delay = 0.05\n session.measure_when = nidcpower.MeasureWhen.AUTOMATICALLY_AFTER_SOURCE_COMPLETE\n\n session.commit()\n\n with session.initiate():\n measurements1 = session.fetch_multiple(count=1, timeout=1.0)\n session.voltage_level = 4\n measurements2 = session.fetch_multiple(count=1, timeout=1.0)\n print(f\"Measurements 1: \\n- Voltage: {measurements1[0][0]}\\n- Current: {measurements1[0][1]}\\n- In Compliance: {measurements1[0][2]}\")\n print(f\"Measurements 2: \\n- Voltage: {measurements2[0][0]}\\n- Current: {measurements2[0][1]}\\n- In Compliance: {measurements2[0][2]}\")","repo_name":"Seralfesp/nidriver-python-examples","sub_path":"src/nidcpower/NI-DCPower Hardware-Timed Single Point.py","file_name":"NI-DCPower Hardware-Timed Single Point.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37621576612","text":"'''\ncredit to https://github.com/chao1224/BioChemGNN_Dense/blob/master/src/models/DMPNN.py\ncredit to https://github.com/chao1224/BioChemGNN/blob/main/BioChemGNN/models/DMPNN.py\n'''\nfrom collections import *\nfrom re import L\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch_scatter import scatter_add\nfrom ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder\n\n\ndef get_revert_edge_index(num_edge):\n \"\"\"\n Corresponding to this line: https://github.com/chao1224/3D_Benchmark_dev/blob/main/Geom3D/datasets/datasets_utils.py#L90-L92\n \"\"\"\n l = []\n for i in range(int(num_edge / 2)):\n l.extend([i*2+1, i*2])\n return l\n\n\nclass DMPNN(nn.Module):\n def __init__(self, num_layer, emb_dim, JK=\"last\", drop_ratio=0, gnn_type=\"gin\"):\n super(DMPNN, self).__init__()\n self.num_layer = num_layer\n self.drop_ratio = drop_ratio\n self.JK = JK\n\n if self.num_layer < 2:\n raise ValueError(\"Number of GNN layers must be greater than 1.\")\n\n self.atom_encoder = AtomEncoder(emb_dim)\n self.bond_encoder = BondEncoder(emb_dim)\n \n self.W_input = nn.Linear(emb_dim*2, emb_dim, bias=False)\n self.W_hidden = nn.Linear(emb_dim, emb_dim, bias=False)\n self.W_output = nn.Linear(emb_dim*2, emb_dim)\n\n ###List of batchnorms\n self.batch_norms = nn.ModuleList()\n for _ in range(num_layer):\n self.batch_norms.append(nn.BatchNorm1d(emb_dim))\n\n def forward(self, *argv):\n if len(argv) == 3:\n x, edge_index, edge_attr = argv[0], argv[1], argv[2]\n elif len(argv) == 1:\n data = argv[0]\n x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr\n else:\n raise ValueError(\"unmatched number of arguments.\")\n\n x = self.atom_encoder(x)\n edge_attr = self.bond_encoder(edge_attr)\n\n b_from_a = out_node_index = edge_index[0]\n b_to_a = in_node_index = edge_index[1]\n message = torch.cat([x[b_to_a], edge_attr], dim=-1)\n message = self.W_input(message)\n\n num_nodes = len(x)\n num_edges = len(b_from_a)\n\n reverse_edge_index = torch.LongTensor(get_revert_edge_index(num_edges))\n\n for i in range(self.num_layer - 1):\n node_message = scatter_add(message, in_node_index, dim=0, dim_size=num_nodes)\n rev_edge_message = message[reverse_edge_index]\n message = node_message[b_from_a] - rev_edge_message\n message = self.W_hidden(message)\n message = self.batch_norms[i](message)\n message = F.dropout(F.relu(message), self.drop_ratio, training=self.training)\n\n node_message = scatter_add(message, in_node_index, dim=0, dim_size=num_nodes)\n node_representation = torch.cat([x, node_message], dim=1)\n node_representation = F.relu(self.W_output(node_representation))\n\n return node_representation\n","repo_name":"chao1224/Geom3D","sub_path":"Geom3D/models/DMPNN.py","file_name":"DMPNN.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"3"} +{"seq_id":"40370202631","text":"import threading\nimport time\n\n# File types that can be handled by this backend (python regexp format)\nregexps = [\".*\"]\n\nclass Player(object):\n def __init__(self, regexp, filename, subtune, loops):\n self.filename = filename\n self.stopped = False\n self.cmds = []\n\n def play(self):\n print(\"Playing\")\n self.thread = threading.Thread(target=self._work)\n self.thread.start()\n self.thread.join()\n self.proc = None\n self.stopped = True\n\n def pause(self):\n print(\"Pause\")\n self.cmds.append('pause')\n\n def resume(self):\n print(\"Unpause\")\n self.cmds.append('unpause')\n\n def abort(self):\n print(\"Stop\")\n self.cmds.append('stop')\n\n def _work(self, length=60):\n left = length\n state = 'playing'\n while True:\n while self.cmds:\n cmd = self.cmds.pop(0)\n\n if cmd == 'pause':\n state = 'paused'\n elif cmd == 'unpause':\n if state == 'paused':\n state = 'playing'\n elif cmd == 'stop':\n state = 'stopped'\n break\n\n if state == 'playing':\n left -= 1\n print(\"Playing, %d seconds left\" % (left,))\n if left <= 0:\n break\n elif state == 'stopped':\n break\n time.sleep(1)\n\n self.stopped = True\n\ndef get_player(regexp, filename, subtune, loops):\n return Player(regexp, filename, subtune, loops)\n","repo_name":"Jolmberg/kakrafoon","sub_path":"server/backends/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"5483622139","text":"import time\nimport os\nimport collections\nimport concurrent.futures\nfrom pprint import pprint\nfrom functools import reduce\nscientist = collections.namedtuple('scientist', ['name','nobel','born'])\nada = scientist(name=\"ada lovelace\",nobel=True,born=3443)\nrichard=scientist(name=\"feynman\",nobel=True,born=1934)\nscs = [ada, richard, scientist(name=\"Tu you\",nobel=True,born=\"1983\"), scientist(name=\"Asd\",born=\"1945\",nobel=False),scientist(name=\"lewin\",born=1978,nobel=False),scientist(name=\"Aqwer\",born=1849,nobel=True), scientist(name=\"Loop\",born=8972,nobel=False)]\n\n#fs = (filter(lambda x: x.nobel is True, scs))\n#for i in fs:\n# print(i)\nn = [3,5,2,8]\nprint(list(map(lambda x : (x**2)>10,n)))\nprint(list(filter(lambda x : (x**2)>10,n)))\nprint((reduce(lambda x,y : (x*y),n)))\n#\n#def f1(a):\n# return lambda x : a+x\n#\n#add = f1(2)\n#\n#print(add(3))\n#print(add(5))\n#\n#def trans(x):\n# print(f'Process {os.getpid()} working record {x.name}')\n# time.sleep(1)\n# result = {'name':x.name, 'age': 2018-x.born}\n# print(f'Process {os.getpid()} done processing record {x.name}')\n# return result\n#start = time.time()\n#with concurrent.futures.ThreadPoolExecutor() as ex:\n# result = ex.map(trans, scs)\n#end = time.time()\n#print(f'\\n Time to complete {end-start:.2f}s\\n')\n#pprint(tuple(result))\n\n# decorator functions\n#def outf(fun):\n# def inf(*args,**kwargs):\n# print(\"from the inf function\")\n# return fun(*args,**kwargs)\n# return inf\n#\n#@outf\n#def display():\n# print(\"hey there\")\n#\n#@outf\n#def disp_info(name, age):\n# print(f\"person is {name} and is {age} year old\")\n#\n#disp_info('jms',45)\n#display()\n\n# class decorator functions\nclass dec_class():\n def __init__(self,origfunc):\n self.origfunc = origfunc\n\n def __call__(self,*args,**kwargs):\n print('call method executed')\n return self.origfunc(*args,**kwargs)\n\n@dec_class\ndef display():\n print(\"hey there\")\n\n@dec_class\ndef disp_info(name, age):\n print(f\"person is {name} and is {age} year old\")\n\ndisp_info('jms',45)\ndisplay()\n\n\na = (x**2 for x in range(5))\ntry:\n while a != None:\n print(next(a))\nexcept:\n print(\"list ends here\")\nprint(\"yeh bematlab mein\")\n","repo_name":"Noirdemort/Python_Chronicles","sub_path":"fpd.py","file_name":"fpd.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35631673395","text":"import datetime\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"cine\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.AddField(\n model_name=\"soiree\",\n name=\"time\",\n field=models.TimeField(default=datetime.time(20, 30)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name=\"soiree\",\n name=\"date\",\n field=models.DateField(),\n preserve_default=True,\n ),\n ]\n","repo_name":"nim65s/django-cineclub","sub_path":"cine/migrations/0002_auto_20150109_2007.py","file_name":"0002_auto_20150109_2007.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"23872139425","text":"\"\"\"Test check Docker Config.\"\"\"\n\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\n\nfrom supervisor.addons.addon import Addon\nfrom supervisor.const import CoreState\nfrom supervisor.coresys import CoreSys\nfrom supervisor.docker.interface import DockerInterface\nfrom supervisor.docker.manager import DockerAPI\nfrom supervisor.resolution.checks.docker_config import CheckDockerConfig\nfrom supervisor.resolution.const import ContextType, IssueType, SuggestionType\nfrom supervisor.resolution.data import Issue, Suggestion\n\n\ndef _make_mock_container_get(bad_config_names: list[str], folder: str = \"media\"):\n \"\"\"Make mock of container get.\"\"\"\n mount = {\n \"Type\": \"bind\",\n \"Source\": f\"/mnt/data/supervisor/{folder}\",\n \"Destination\": f\"/{folder}\",\n \"Mode\": \"rw\",\n \"RW\": True,\n \"Propagation\": \"rprivate\",\n }\n\n def mock_container_get(name):\n out = MagicMock()\n out.status = \"running\"\n out.attrs = {\"State\": {}, \"Mounts\": []}\n if name in bad_config_names:\n out.attrs[\"Mounts\"].append(mount)\n\n return out\n\n return mock_container_get\n\n\nasync def test_base(coresys: CoreSys):\n \"\"\"Test check basics.\"\"\"\n docker_config = CheckDockerConfig(coresys)\n assert docker_config.slug == \"docker_config\"\n assert docker_config.enabled\n\n\n@pytest.mark.parametrize(\"folder\", [\"media\", \"share\"])\nasync def test_check(\n docker: DockerAPI, coresys: CoreSys, install_addon_ssh: Addon, folder: str\n):\n \"\"\"Test check reports issue when containers have incorrect config.\"\"\"\n docker.containers.get = _make_mock_container_get(\n [\"homeassistant\", \"hassio_audio\", \"addon_local_ssh\"], folder\n )\n with patch.object(DockerInterface, \"is_running\", return_value=True):\n await coresys.plugins.load()\n await coresys.homeassistant.load()\n await coresys.addons.load()\n\n docker_config = CheckDockerConfig(coresys)\n coresys.core.state = CoreState.RUNNING\n assert not coresys.resolution.issues\n assert not coresys.resolution.suggestions\n\n # An issue and suggestion is added per container with a config issue\n await docker_config.run_check()\n\n assert len(coresys.resolution.issues) == 4\n assert Issue(IssueType.DOCKER_CONFIG, ContextType.CORE) in coresys.resolution.issues\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.ADDON, reference=\"local_ssh\")\n in coresys.resolution.issues\n )\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.PLUGIN, reference=\"audio\")\n in coresys.resolution.issues\n )\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.SYSTEM) in coresys.resolution.issues\n )\n\n assert len(coresys.resolution.suggestions) == 4\n assert (\n Suggestion(SuggestionType.EXECUTE_REBUILD, ContextType.CORE)\n in coresys.resolution.suggestions\n )\n assert (\n Suggestion(\n SuggestionType.EXECUTE_REBUILD, ContextType.PLUGIN, reference=\"audio\"\n )\n in coresys.resolution.suggestions\n )\n assert (\n Suggestion(\n SuggestionType.EXECUTE_REBUILD, ContextType.ADDON, reference=\"local_ssh\"\n )\n in coresys.resolution.suggestions\n )\n assert (\n Suggestion(SuggestionType.EXECUTE_REBUILD, ContextType.SYSTEM)\n in coresys.resolution.suggestions\n )\n\n assert await docker_config.approve_check()\n\n # IF config issue is resolved, all issues are removed except the main one. Which will be removed if check isn't approved\n docker.containers.get = _make_mock_container_get([])\n with patch.object(DockerInterface, \"is_running\", return_value=True):\n await coresys.plugins.load()\n await coresys.homeassistant.load()\n await coresys.addons.load()\n\n assert not await docker_config.approve_check()\n assert len(coresys.resolution.issues) == 1\n assert len(coresys.resolution.suggestions) == 1\n assert (\n Issue(IssueType.DOCKER_CONFIG, ContextType.SYSTEM) in coresys.resolution.issues\n )\n\n\nasync def test_did_run(coresys: CoreSys):\n \"\"\"Test that the check ran as expected.\"\"\"\n docker_config = CheckDockerConfig(coresys)\n should_run = docker_config.states\n should_not_run = [state for state in CoreState if state not in should_run]\n assert len(should_run) != 0\n assert len(should_not_run) != 0\n\n with patch(\n \"supervisor.resolution.checks.docker_config.CheckDockerConfig.run_check\",\n return_value=None,\n ) as check:\n for state in should_run:\n coresys.core.state = state\n await docker_config()\n check.assert_called_once()\n check.reset_mock()\n\n for state in should_not_run:\n coresys.core.state = state\n await docker_config()\n check.assert_not_called()\n check.reset_mock()\n","repo_name":"home-assistant/supervisor","sub_path":"tests/resolution/check/test_check_docker_config.py","file_name":"test_check_docker_config.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":1510,"dataset":"github-code","pt":"3"} +{"seq_id":"27056757703","text":"\"\"\"Run the manual strategy visualisation output test.\n\nHow to run:\n\n.. code-block:: shell\n\n export WEBHOOK_URL=https://enzyme-polygon-eth-usdc.tradingstrategy.ai/\n python scripts/manual-visualisation-test.py\n\nIt will open the visualisation in a web browser pop up.\n\nSee also: https://tradingstrategy.ai/docs/programming/strategy-examples/examine-live-strategy.html\n\"\"\"\n\nimport datetime\nimport os\n\nimport requests\n\nfrom tradeexecutor.strategy.reverse_universe import reverse_trading_universe_from_state\nfrom tradeexecutor.visual.strategy_state import draw_single_pair_strategy_state\nfrom tradingstrategy.charting.candle_chart import VolumeBarMode\nfrom tradingstrategy.client import Client\nfrom tradeexecutor.monkeypatch.dataclasses_json import patch_dataclasses_json\nfrom tradeexecutor.state.state import State\nfrom tradingstrategy.timebucket import TimeBucket\n\n# Currently needed because unpatched dataclasses_json package issues\npatch_dataclasses_json()\n\nclient = Client.create_jupyter_client()\n\n# Public internet endpoint as exposed by the trade executor Docker\nwebbhook_url = os.environ[\"WEBHOOK_URL\"]\n\nstate_api = f\"{webbhook_url}/state\"\nresp = requests.get(state_api)\nstate_blob = resp.content\n\nprint(f\"Downloaded {len(state_blob):,} bytes state data\")\n\nstate: State = State.from_json(state_blob)\npair = state.portfolio.get_single_pair()\n\n# Add some data margin around our\n# trade timeline visualisation\nfirst_trade, last_trade = state.portfolio.get_first_and_last_executed_trade()\nfeed_start_at = first_trade.started_at - datetime.timedelta(days=2)\nfeed_end_at = last_trade.executed_at + datetime.timedelta(days=2)\n\nprint(\"Loading data the strategy has been executing on\")\nuniverse = reverse_trading_universe_from_state(\n state,\n client,\n TimeBucket.h1,\n)\n\nprint(\"Drawing an example visualisation\")\nsmall_figure = draw_single_pair_strategy_state(\n state,\n universe,\n height=512,\n)\n\n# Open a web browser pop up\n#small_figure.show()\n\nfor p in state.portfolio.get_all_positions():\n print(f\"Position #{p.position_id}: {p.get_opening_price()} {p.pair.get_ticker()}\")\n\nfor t in state.portfolio.get_all_trades():\n print(f\"Trade {t.trade_id}, assumed price: {t.planned_price}, executed price: {t.executed_price}\")\n","repo_name":"tradingstrategy-ai/trade-executor","sub_path":"scripts/manual-visualisation-test.py","file_name":"manual-visualisation-test.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"3"} +{"seq_id":"108512558","text":"import sys\nimport os\nimport glob\nimport threading\nimport queue\n\nimport zapi\nimport jiraapi\nimport logging\nimport json\nimport time\nimport subprocess\nimport argparse\nimport html\nimport itertools\nimport re\n\nusername = 'andy.anderson@mobiledgex.com'\n# jira_token = '***REMOVED***'\njira_token = '***REMOVED***'\n# userkey\n# access_key = '***REMOVED***'\n# secret_key = '***REMOVED***'\n\n# systemkey\n# access_key = '***REMOVED***';\n# secret_key = '***REMOVED***'\naccountid = '***REMOVED***'\naccess_key = '***REMOVED***'\nsecret_key = '***REMOVED***'\n\npython_path = '$WORKSPACE/go/src/github.com/mobiledgex/protos:$WORKSPACE/go/src/github.com/mobiledgex/modules:$WORKSPACE/go/src/github.com/mobiledgex/certs:$WORKSPACE/go/src/github.com/mobiledgex/testcases:$WORKSPACE/go/src/github.com/mobiledgex/testcases/config'\n# python_path = '/Users/andyanderson/go/src/github.com/mobiledgex/edge-cloud-qa/protos:/Users/andyanderson/go/src/github.com/mobiledgex/edge-cloud-qa/modules:/Users/andyanderson/go/src/github.com/mobiledgex/edge-cloud-qa/certs:/Users/andyanderson/go/src/github.com/mobiledgex/edge-cloud-qa/testcases'\n\nfound_failure = -1\nnumber_failed = 0\nnumber_passed = 0\ndelay_between_tests = 10\ntestcase_timeout = '60m'\nfailed_list = []\n\ncrm_pool_round_robin = None\ncrm_pool_var = None\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(filename)s %(funcName)s() line %(lineno)d - %(levelname)s - - %(message)s\")\nlogging.getLogger('urllib3').setLevel(logging.ERROR)\nlogging.getLogger('zapi').setLevel(logging.DEBUG)\n\n\nclass TCWorker(threading.Thread):\n def __init__(self, tc_queue, z, *args, **kwargs):\n self.tc_queue = tc_queue\n self.z = z\n super().__init__(*args, **kwargs)\n\n def run(self):\n while True:\n try:\n logger.info('TCWorker getting tc')\n tc = self.tc_queue.get(timeout=1)\n except queue.Empty:\n logger.info('TCWorker tc_queue is empty. Exiting thread')\n return\n\n logger.info(f'TCWorker executing {tc}, queuesize={self.tc_queue.qsize()}')\n try:\n exec_testcase(self.z, tc)\n except Exception as e:\n logger.error(f'exec_testcase exception {e}')\n\n logger.info(f'TCWorker exeution done {tc}')\n self.tc_queue.task_done()\n\n\ndef main():\n starttime = time.time()\n\n parser = argparse.ArgumentParser(description='copy tests to release')\n parser.add_argument('--version_from_load', action='store_true')\n parser.add_argument('--failed_only', action='store_true')\n parser.add_argument('--test_timeout', default='60m')\n\n args = parser.parse_args()\n\n num_executors = 1\n\n global crm_pool_round_robin\n global crm_pool_var\n global testcase_timeout\n\n testcase_timeout = args.test_timeout\n\n print(os.environ)\n cycle = os.environ['Cycle']\n if 'Version' in os.environ:\n version = os.environ['Version']\n else:\n version = cycle.split('_')[1]\n project = os.environ['Project']\n folder = None\n component = os.environ['Components']\n crm_pool_name = 'CRMPool'\n if 'Platform' in os.environ:\n component = component + ' ,' + os.environ['Platform']\n folder = os.environ['Platform'].lower()\n crm_pool_name = crm_pool_name + os.environ['Platform']\n if 'TestTarget' in os.environ: # will replace Platform\n component = component + ' ,' + os.environ['TestTarget']\n folder = os.environ['TestTarget'].lower()\n # crm_pool_name = crm_pool_name + os.environ['TestTarget']\n if 'Folder' in os.environ:\n folder = os.environ['Folder']\n\n workspace = os.environ['WORKSPACE']\n # httpTrace = os.environ['httpTrace']\n if 'NumberParallelExecutions' in os.environ:\n num_executors = int(os.environ['NumberParallelExecutions'])\n\n # export CRMPool=\"{\\\"cloudlet_name_openstack_shared\\\":[{\\\"cloudlet\\\":\\\"automationHawkinsCloudlet\\\",\\\"operator\\\":\\\"GDDT\\\",\\\"region\\\":\\\"EU\\\"},{\\\"cloudlet\\\":\\\"packet-qaregression\\\",\\\"operator\\\":\\\"packet\\\",\\\"region\\\":\\\"US\\\"},{\\\"cloudlet\\\":\\\"automationParadiseCloudlet\\\",\\\"operator\\\":\\\"GDDT\\\",\\\"region\\\":\\\"EU\\\"}]}\"\n crm_pool_dict = None\n if crm_pool_name in os.environ:\n try:\n crm_pool_dict = json.loads(os.environ[crm_pool_name])\n logging.info(f'crm_pool_dict={crm_pool_dict}')\n except Exception as e:\n logging.error(f'error loading CRMPool:{e}')\n sys.exit(1)\n crm_pool_var = list(crm_pool_dict)[0]\n crm_pool_round_robin = itertools.cycle(crm_pool_dict[list(crm_pool_dict)[0]])\n\n # if httpTrace == 'true':\n # httpTrace = 1\n # else:\n # httpTrace = 0\n\n # zephyrBaseUrl = \"https://mobiledgex.atlassian.net/rest/zapi/latest/\"\n\n logger.info(f'cycle={cycle} version={version} project={project} folder={folder} component={component} numexecutors={num_executors} workspace={workspace} cmrpool={crm_pool_dict} crm_pool_round_robin={crm_pool_round_robin}')\n\n z = zapi.Zapi(username=accountid, access_key=access_key, secret_key=secret_key, debug=False)\n j = jiraapi.Jiraapi(username=username, token=jira_token)\n\n project_info = j.get_project(project)\n content = json.loads(project_info)\n project_id = content['id']\n version_id = None\n for v in content['versions']:\n if v['name'] == version:\n version_id = v['id']\n cycle_id = z.get_cycle_id(name=cycle, project_id=project_id, version_id=version_id)\n if not cycle_id:\n logger.error(f'cycle id not for found for cycle={cycle}')\n sys.exit(1)\n\n folder_id = None\n if folder:\n folder_id = z.get_folder_id(name=folder, project_id=project_id, version_id=version_id, cycle_id=cycle_id)\n if not folder_id:\n logger.error(f'folder id not for found for folder={folder}')\n sys.exit(1)\n\n # zephyrQueryUrl = zephyrBaseUrl + \"zql/executeSearch?zqlQuery=\" + urllib.parse.quote_plus(\"project=$project AND fixVersion=\\\"$version\\\" AND cycleName in (\\\"$cycle\\\") AND summary ~ \\\"$summary\\\" ORDER BY Issue ASC\") + \"&maxRecords=2000\"\n # zephyrQueryUrl = \"project=\" + project + \" AND fixVersion=\\\"\" + version + \"\\\" AND cycleName in (\\\"\" + cycle + \"\\\") AND summary ~ \\\"\" + summary + \"\\\" ORDER BY Issue ASC\"\n # zephyrQueryUrl = \"project=\" + project\n # zephyrQueryUrl = \"fixVersion=Nimbus\"\n # jiraTestcaseQuery = \"project=\" + project + \" AND fixVersion=\\\"\" + version + \"\\\" AND cycleName in (\\\"\" + cycle + \"\\\") AND summary ~ \\\"\" + summary + \"\\\" ORDER BY Issue ASC\"\n component_list = component.split(',')\n component_query = ''\n z_component_query = ''\n for component in component_list:\n component_query += f' AND component = \\\"{component.strip()}\\\"'\n z_component_query += f' AND component = \\\\\\\"{component.strip()}\\\\\\\"'\n\n zephyrQueryUrl = 'project=\\\\\\\"' + project + '\\\\\\\" AND fixVersion=\\\\\\\"' + version + '\\\\\\\"' + component_query + ' ORDER BY Issue ASC'\n jiraQueryUrlPre = 'project=\"' + project + '\" AND fixVersion=\"' + version + '\"' + component_query\n\n if args.failed_only:\n logger.info('Only executing failed testcases')\n zephyrQueryUrl = f'project=\\\\\\\"edge-cloud QA\\\\\\\" AND fixVersion=\\\\\\\"{version}\\\\\\\"{z_component_query} AND cycleName=\\\\\\\"{cycle}\\\\\\\" AND executionStatus=Fail ORDER BY Issue ASC'\n failed_tcids = get_zephyr_failed_testcases(z, zephyrQueryUrl, zephyrQueryUrl)\n if len(failed_tcids) <= 0:\n failed_tcids = ['EC-1']\n jiraQueryUrlPre += ' AND key in ('\n for key in failed_tcids:\n jiraQueryUrlPre += f'{key},'\n jiraQueryUrlPre = re.sub(r',$', '', jiraQueryUrlPre)\n jiraQueryUrlPre += ')'\n\n jiraQueryUrl = jiraQueryUrlPre + ' ORDER BY Issue ASC'\n\n logger.info(\"zephyrQueryUrl=\" + zephyrQueryUrl)\n\n # result = z.execute_query(zephyrQueryUrl)\n startat = 0\n maxresults = 0\n total = 1\n tc_queue = queue.Queue()\n while (startat + maxresults) < total:\n # jiraQueryUrl = jiraQueryUrlPre + ' startAt=' + str(startat + maxresults) + ' ORDER By Issue ASC'\n result = j.search(query=jiraQueryUrl, start_at=startat + maxresults)\n query_content = json.loads(result)\n startat = query_content['startAt']\n maxresults = query_content['maxResults']\n total = query_content['total']\n print(startat, maxresults, total)\n new_queue = get_testcases(z, result, cycle_id, project_id, version_id, folder_id, folder)\n while not new_queue.empty():\n tc_queue.put(new_queue.get())\n\n logger.info(f'len tc_queue={tc_queue.qsize()}')\n # exec_status = exec_testcases(z, tc_list, rhc, httpTrace, summary)\n exec_status = exec_testcases_parallel(z, tc_queue, num_executors, args.failed_only)\n logger.info(\"exec_status=\" + str(exec_status))\n\n endtime = time.time()\n logger.info(f'test duration is {(endtime-starttime)/60} minutes')\n\n sys.exit(exec_status)\n\n\ndef get_zephyr_failed_testcases(z, url, query):\n print(f'execututing zephyr query={query}')\n total_count = 9999999\n num_returned = 0\n total_returned = 0\n tc_list = []\n while total_returned < total_count:\n result = z.execute_query(url, offset=total_returned)\n query_content = json.loads(result)\n total_count = query_content['totalCount']\n num_returned = len(query_content['searchObjectList'])\n total_returned += num_returned\n\n for exec in query_content['searchObjectList']:\n print(f\"tcid={exec['issueKey']} defects={exec['execution']['defects']}\")\n if len(exec['execution']['defects']) == 0:\n tc_list.append(exec['issueKey'])\n else:\n tc_list.append(exec['issueKey'])\n for defect in exec['execution']['defects']:\n if defect['status']['name'] != 'Closed' and defect['status']['name'] != 'Ready To Verify':\n tc_list.pop()\n break\n\n print(f'found {len(tc_list)} failed testcases')\n\n return tc_list\n\n\ndef get_testcases(z, result, cycle_id, project_id, version_id, folder_id, folder_name):\n query_content = json.loads(result)\n tc_queue = queue.Queue()\n\n for s in query_content['issues']:\n print('issueKey', s['key'])\n logger.info(\"getting script for:\" + s['key'])\n sresult = z.get_teststeps(s['id'], s['fields']['project']['id'])\n sresult_content = json.loads(sresult)\n\n if sresult_content: # list is not empty;therefore, has a teststep\n logger.info(\"found a teststep\")\n # tmp_list = {'id': s['id'], 'tc': sresult_content[0]['step'], 'issue_key': s['issueKey'], 'issue_id': s['issueId']}\n # tmp_list = {'id': s['execution']['id'], 'tc': sresult_content[0]['step'], 'issue_key': s['issueKey'], 'issue_id': s['execution']['issueId'], 'defects': s['execution']['defects'], 'project_id': s['execution']['projectId'], 'version_id':s['execution']['versionId'], 'cycle_id':s['execution']['cycleId']}\n try:\n tmp_list = {'tc': sresult_content[0]['step'], 'issue_key': s['key'], 'issue_id': s['id'], 'project_id': project_id, 'version_id': version_id, 'cycle_id': cycle_id, 'folder_id': folder_id, 'folder_name': folder_name, 'defects': s['fields']['issuelinks']}\n print(s)\n tmp_list['defect_count'] = len(s['fields']['issuelinks']) # need to check for issueslink section\n # if 'totalDefectCount' in s['execution']: # totalDefectCount only exists if the test has previously been executed\n # tmp_list['defect_count'] = s['execution']['totalDefectCount']\n # else:\n # tmp_list['defect_count'] = 0\n logger.info(\"script is \" + sresult_content[0]['step'])\n except Exception as e:\n logger.error('error getting teststep from', s['key'], 'error:', e)\n else:\n logger.info(\"did NOT find a teststep\")\n tmp_list = {'id': s['id'], 'tc': 'noTestcaseInStep', 'issue_key': s['key']}\n\n tc_queue.put(tmp_list)\n print(tc_queue)\n\n return tc_queue\n\n\ndef get_testcases_z(z, result, cycle):\n query_content = json.loads(result)\n tc_list = []\n\n for s in query_content['searchObjectList']:\n print('cycleName', s['execution']['cycleName'], cycle)\n if s['execution']['cycleName'] == cycle:\n logger.info(\"getting script for:\" + s['issueSummary'])\n # sresult = z.get_teststeps(s['issueId'])\n sresult = z.get_teststeps(s['execution']['issueId'], s['execution']['projectId'])\n sresult_content = json.loads(sresult)\n # logging.info(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n # logging.info(\"sresult=\" + sresult_content)\n # logging.info(\"YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY\")\n # logging.info(\"stepLength=%d\" % int(len(sresult_content)))\n if sresult_content: # list is not empty;therefore, has a teststep\n logger.info(\"found a teststep\")\n # tmp_list = {'id': s['id'], 'tc': sresult_content[0]['step'], 'issue_key': s['issueKey'], 'issue_id': s['issueId']}\n tmp_list = {'id': s['execution']['id'], 'tc': sresult_content[0]['step'], 'issue_key': s['issueKey'], 'issue_id': s['execution']['issueId'], 'defects': s['execution']['defects'], 'project_id': s['execution']['projectId'], 'version_id': s['execution']['versionId'], 'cycle_id': s['execution']['cycleId']}\n if 'totalDefectCount' in s['execution']: # totalDefectCount only exists if the test has previously been executed\n tmp_list['defect_count'] = s['execution']['totalDefectCount']\n else:\n tmp_list['defect_count'] = 0\n logger.info(\"script is \" + sresult_content[0]['step'])\n else:\n logger.info(\"did NOT find a teststep\")\n tmp_list = {'id': s['id'], 'tc': 'noTestcaseInStep', 'issue_key': s['issueKey']}\n\n tc_list.append(tmp_list)\n\n return tc_list\n\n\n# def update_defects(z, l):\n# logger.info('updating defects')\n# for t in l:\n# logger.info(\"checking defects for \" + t['issue_key'])\n# # elist = z.get_execution_list(execution_id = t['issue_id'])\n# # elist = z.get_execution_list(execution_id = t['id'])\n# # elist_string = json.loads(elist)\n# # execList = elist_string['executions']\n# # if len(execList) > 1 and execList[1]['totalDefectCount'] > 0:\n# print('t', t)\n# if 'defect_count' not in t:\n# t['defect_count'] = 0\n# if t['defect_count'] > 0:\n# logger.info('defects found = ' + str(t['defect_count']))\n# # previous_exec_defects = execList[1]['defects']\n# previous_exec_defects = t['defects']\n# print(previous_exec_defects)\n# d_list = []\n# for ped in previous_exec_defects:\n# print('status', ped['inwardIssue']['fields']['status']['name'])\n# if ped['inwardIssue']['fields']['status']['name'] != 'Closed':\n# print(ped['inwardIssue']['key'])\n# d_list.append(ped['inwardIssue']['key'])\n#\n# print(d_list)\n#\n# logger.info('updating defect list for ' + t['issue_key'] + ' to ' + str(d_list))\n# elist = z.get_execution_list(execution_id=t['issue_id'])\n# elist_string = json.loads(elist)\n# print(elist_string)\n# execList = elist_string['executions']\n#\n# z.update_execution_details(execution_id=execList[0]['id'], defect_list=d_list)\n# #time.sleep(5)\n# sys.exit(1)\n# else:\n# logger.info('no defects found')\n\n\ndef find(name, path):\n logger.debug('finding file {} in {}'.format(name, path))\n for root, dirs, files in os.walk(path):\n if name in files:\n logger.debug('found {} {}'.format(root, name))\n return os.path.join(root, name)\n elif name in dirs:\n logger.debug('found directory {} {}'.format(root, name))\n return os.path.join(root, name)\n logger.error('could not find {}'.format(name))\n return 'fileNotFound'\n\n\ndef update_single_defect(z, t):\n print(t)\n if 'defect_count' not in t:\n t['defect_count'] = 0\n if t['defect_count'] > 0:\n logger.info('defects found = ' + str(t['defect_count']))\n # previous_exec_defects = execList[1]['defects']\n previous_exec_defects = t['defects']\n print(previous_exec_defects)\n d_list = []\n for ped in previous_exec_defects:\n if 'inwardIssue' in ped:\n print('status', ped['inwardIssue']['fields']['status']['name'])\n if ped['inwardIssue']['fields']['status']['name'] != 'Closed':\n print(ped['inwardIssue']['key'])\n d_list.append(ped['inwardIssue']['id'])\n\n print(d_list)\n else:\n logger.info('not updating issue since no inwardIssue')\n\n if d_list:\n logger.info('updating defect list for ' + t['issue_key'] + ' to ' + str(d_list))\n # elist = z.get_execution_list(execution_id = t['issue_id'])\n # elist_string = json.loads(elist)\n # execList = elist_string['executions']\n\n z.update_execution_details(execution_id=t['execution_id'], project_id=t['project_id'], issue_id=t['issue_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], defect_list=d_list)\n # time.sleep(5)\n # sys.exit(1)\n else:\n logger.info('no defects to update, defect list is empty')\n else:\n logger.info('no defects found')\n\n\ndef exec_testcases_parallel(z, tc_queue, num_executors, failed_only):\n global found_failure\n global number_passed\n global number_failed\n global failed_list\n global crm_pool_round_robin\n\n num_testcases = tc_queue.qsize()\n\n for _ in range(num_executors):\n TCWorker(tc_queue, z).start()\n tc_queue.join() # wait till queue is empty\n\n if failed_only and num_testcases == 0:\n print('running failed only and found no failed testcases to execute. setting found_failure to 0')\n found_failure = 0\n print('found_failure', found_failure)\n print('number_testcases', num_testcases, 'number_passed', number_passed, 'number_failed', number_failed)\n print('failed testcases:', failed_list)\n\n return found_failure\n\n\ndef exec_testcases_parallel_orig(z, tc_list, num_executors, failed_only):\n global found_failure\n global number_passed\n global number_failed\n global crm_pool_round_robin\n\n threads = []\n\n logger.info('number of testcases is ' + str(len(tc_list)))\n for t in range(0, len(tc_list), num_executors):\n print('t', t)\n plist = tc_list[t:t + num_executors]\n logger.info('adding this many testcases:' + str(len(plist)))\n for p in plist:\n logger.info('adding thread for tc=' + p['tc'])\n thread = threading.Thread(target=exec_testcase, args=(z, p))\n threads += [thread]\n thread.start()\n time.sleep(delay_between_tests) # wait between starting each testcase\n\n for x in threads:\n x.join()\n\n if failed_only and len(tc_list) == 0:\n print('running failed only and found no failed testcases to execute. setting found_failure to 0')\n found_failure = 0\n print('found_failure', found_failure)\n print('number_testcases', len(tc_list), 'number_passed', number_passed, 'number_failed', number_failed)\n\n return found_failure\n\n\ndef exec_testcase(z, t):\n # found_failure = -1\n global found_failure\n global number_passed\n global number_failed\n global crm_pool_round_robin\n global crm_pool_var\n global failed_list\n\n region = 'notset'\n cloudlet = 'notset'\n operator = 'notset'\n\n print('tc', t['tc'])\n last_status = 'unset'\n\n linux_os = windows_os = False\n\n if os.name == 'posix':\n linux_os = True\n elif os.name == 'nt':\n windows_os = True\n\n if linux_os:\n tmpdir = '/tmp/'\n python_path = '$WORKSPACE/go/src/github.com/mobiledgex/protos:$WORKSPACE/go/src/github.com/mobiledgex/modules:$WORKSPACE/go/src/github.com/mobiledgex/certs:$WORKSPACE/go/src/github.com/mobiledgex/testcases::$WORKSPACE/go/src/github.com/mobiledgex/testcases/config'\n elif windows_os:\n tmpdir = os.environ['TMP'] + '\\\\'\n python_path = '%WORKSPACE%/go/src/github.com/mobiledgex/protos;%WORKSPACE%/go/src/github.com/mobiledgex/modules;%WORKSPACE%/go/src/github.com/mobiledgex/certs;%WORKSPACE%/go/src/github.com/mobiledgex/testcases;$WORKSPACE/go/src/github.com/mobiledgex/testcases/config'\n\n if t['tc'] == 'noTestcaseInStep':\n logger.info('skipping execution of {}. does not contain a testcase'.format(t['issue_key']))\n found_failure = 1 # consider it a failure if the teststep is missing\n number_failed += 1\n failed_list.append(t['issue_key'])\n # continue # go to the next testcase. probably should have put the rest of the code in else statement but this was added later\n return\n\n logger.info(\"executing \" + t['issue_key'])\n print('xxxxxx', t['project_id'])\n status = z.create_execution(issue_id=t['issue_id'], project_id=t['project_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], folder_id=t['folder_id'], status=3)\n query_content = json.loads(status)\n status_s = json.dumps(status)\n\n t['execution_id'] = query_content['execution']['id']\n print('execid', t['execution_id'])\n\n if t['defect_count'] > 0:\n update_single_defect(z, t)\n\n status = z.update_status(execution_id=t['execution_id'], issue_id=t['issue_id'], project_id=t['project_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], status=3)\n if 'll execution(s) were successfully updated' in status_s:\n logger.info(\"tc status WIP updated successful\")\n else:\n logger.info(\"tc status WIP update FAIL\")\n\n tc_type = ''\n tc = 'tcnotset'\n robot_tcname = None\n print(t)\n print('xxxxxxxx', t['tc'], 'bbbbbb', os.path.basename(t['tc']))\n if '.robot' in t['tc'] or t['tc'].endswith('_robot'):\n tc_type = 'robot'\n tclines = t['tc'].splitlines()\n tc = tclines[0]\n if len(tclines) > 1:\n robot_tcname = tclines[1]\n elif '.tc.' in os.path.basename(t['tc']) or '.tc_' in os.path.basename(t['tc']):\n tc_type = 'python'\n tc = os.path.basename(t['tc'])\n elif '.sln' in t['tc']:\n tc_type = 'csharp'\n tc = t['tc']\n elif '.cpp' in t['tc']:\n tc_type = 'cpp'\n tc = t['tc']\n else:\n tc = os.path.basename(t['tc'])\n\n # tmpdir = os.environ['TMPDIR']\n # tmpdir = '/tmp/'\n tc_replace = tc.replace('/', '') # remove slash from filename\n # file_delete = tmpdir + os.environ['Cycle'] + \"_\" + tc_replace + \"_\" + t['issue_key'] + \"*\"\n file_delete = f'{tmpdir}*{t[\"folder_name\"]}_{t[\"issue_key\"]}*'\n file_output = f'{tmpdir}{os.environ[\"Cycle\"]}_{t[\"folder_name\"]}_{tc_replace}_{t[\"issue_key\"]}_{str(int(time.time()))}.out'\n file_extension = '.txt'\n\n # delete old files since /tmp eventually gets filled up\n # delete_cmd = \"rm -f \" + file_delete\n logger.info(\"deleting \" + file_delete)\n # subprocess.run(delete_cmd, shell=True, check=True)\n for f in glob.glob(file_delete):\n try:\n os.remove(f)\n except Exception as e:\n logging.info(f'remove failed:{e}')\n\n my_env = os.environ.copy()\n if tc_type == 'robot':\n robot_file = find(tc, os.environ['WORKSPACE'])\n xml_output = file_output + '.xml'\n report_output = file_output + '.html'\n var_cmd = ''\n variable_file = ''\n var_override_cmd = ''\n region = 'noCRMPoolDefined'\n cloudlet = 'noCRMPoolDefined'\n operator = 'noCRMPoolDefined'\n\n if 'VariableFile' in os.environ:\n variable_file = os.environ['VariableFile']\n if len(variable_file) > 0:\n variable_file_full = find(variable_file, os.environ['WORKSPACE'])\n var_cmd = f'--variablefile {variable_file_full}'\n if crm_pool_round_robin:\n print('round')\n next_crm = next(crm_pool_round_robin)\n logger.info(f'executing on pool={next_crm}')\n region = next_crm['region']\n cloudlet = next_crm['cloudlet']\n cloudlet_openrc = cloudlet\n operator = next_crm['operator']\n physical_name = next_crm['physical_name']\n var_override_cmd = f'--variable {crm_pool_var}:{cloudlet} --variable physical_name_crm:{physical_name} --variable operator_name_openstack:{operator} --variable operator_name_crm:{operator} --variable region:{region}'\n\n env_file = find(f'automation_env_{region}.sh', os.environ['WORKSPACE'])\n\n # only openstack needs the openrc file so set to Buckhorn if not openstack. This is so non Openstack tests will still execute. Not sure I need this???\n # if os.environ['Platform'] != 'Openstack' or os.environ['TestTarget'] != 'Openstack':\n # cloudlet_openrc = 'automationBuckhornCloudlet'\n\n openstack_file = find(f'openrc_{cloudlet_openrc}.mex', os.environ['WORKSPACE'])\n logger.info(f'using env_file={env_file} openstack_file={openstack_file}')\n\n my_env['AUTOMATION_OPENSTACK_DEDICATED_ENV'] = openstack_file\n my_env['AUTOMATION_OPENSTACK_SHARED_ENV'] = openstack_file\n my_env['AUTOMATION_OPENSTACK_VM_ENV'] = openstack_file\n my_env['AUTOMATION_OPENSTACK_GPU_ENV'] = os.environ['WORKSPACE'] + '/go/src/github.com/mobiledgex/testcases/config/openrc_automationBuckhornCloudlet.mex'\n my_env['AUTOMATION_OPENSTACK_OFFLINE_ENV'] = openstack_file\n my_env['AUTOMATION_OPENSTACK_VGPU_ENV'] = os.environ['WORKSPACE'] + '/go/src/github.com/mobiledgex/testcases/config/openrc_automationBuckhornCloudlet.mex'\n my_env['AUTOMATION_OPENSTACK_VMPOOL_ENV'] = os.environ['WORKSPACE'] + '/go/src/github.com/mobiledgex/testcases/config/openrc_automationBuckhornCloudlet.mex'\n\n with open(env_file) as f:\n lines = f.readlines()\n for line in lines:\n if '=' in line:\n var, value = line.split('=')\n value = value.strip()\n logger.info(f'adding env {var}={value}')\n my_env[var] = value\n logger.debug(f'my_env={my_env}')\n\n if robot_tcname:\n if linux_os:\n exec_cmd = f'export PYTHONPATH={python_path};robot --loglevel TRACE {var_cmd} {var_override_cmd} --outputdir /tmp --report {report_output} --output {xml_output} --log {file_output} -t \\\"{robot_tcname}\\\" {robot_file}'\n elif windows_os:\n exec_cmd = f'set PYTHONPATH={python_path} & robot --loglevel INFO {var_cmd} --outputdir {tmpdir} --output {xml_output} --log {file_output} -t \\\"{robot_tcname}\\\" {robot_file}'\n else:\n exec_cmd = f'export PYTHONPATH={python_path};robot --loglevel TRACE {var_cmd} {var_override_cmd} --outputdir /tmp --report {report_output} --output {xml_output} --log {file_output} {robot_file}'\n # file_output = '/tmp/log.html'\n file_extension = '.html'\n elif tc_type == 'python':\n exec_cmd = 'export PYTHONPATH=' + python_path + ';python3 -m unittest ' + tc + ' > ' + file_output + ' 2>&1'\n elif tc_type == 'csharp':\n dirname, solutionname = tc.split('/')\n tc_file = find(solutionname, os.environ['WORKSPACE'])\n dll = os.path.dirname(tc_file) + f'/{dirname}/bin/Debug/netcoreapp3.1/{dirname}.dll'\n csproj = os.path.dirname(tc_file) + f'/{dirname}/{dirname}.csproj'\n exec_cmd = f'dotnet clean {csproj} && dotnet build {tc_file} -c Debug /p:Version=1.0 && dotnet {dll} > {file_output} 2>&1'\n elif tc_type == 'cpp':\n dirname, cppname = tc.split('/')\n tc_file = find(cppname, os.environ['WORKSPACE'])\n exec_cmd = f'cd {os.path.dirname(tc_file)};make clean && make && ./{dirname} > {file_output} 2>&1'\n # else:\n # exec_cmd = \"export AUTOMATION_HTTPTRACE=\" + str(httpTrace) + \";export AUTOMATION_RHCIP=\" + rhc + \";./\" + tc + \" \" + t['issue_key'] + \" > \" + file_output + \" 2>&1\"\n # exec_cmd = \"export AUTOMATION_IP=\" + rhc + \";\" + \"pwd\" + \" > /tmp/\" + file_output + \" 2>&1\"\n logger.info(\"executing \" + exec_cmd)\n try:\n exec_file = f'{file_output}.exec'\n logger.info(f'writing exec file {exec_file}')\n with open(exec_file, 'w') as f:\n f.write(exec_cmd)\n except Exception as e:\n logger.info(f'exec file write error {e}')\n\n exec_start = time.time()\n try:\n if linux_os:\n exec_cmd = f'timeout {testcase_timeout} bash \"{exec_file}\" && rm {file_output}.exec'\n logger.info(\"subprocess \" + exec_cmd)\n r = subprocess.run(exec_cmd, shell=True, check=True, env=my_env)\n logger.info(f'subprocess returncode={r.returncode}')\n exec_stop = time.time()\n exec_duration = exec_stop - exec_start\n comment = html.escape('{\"region\":\"' + region + '\", \"cloudlet\":\"' + cloudlet + '\", \"operator\":\"' + operator + '\", \"start_time\":' + str(exec_start) + ', \"end_time\":' + str(exec_stop) + ', \"duration\":' + str(exec_duration) + '}')\n status = z.update_status(execution_id=t['execution_id'], issue_id=t['issue_id'], project_id=t['project_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], status=1, comment=comment)\n # status = z.create_execution(issue_id=t['issue_id'], project_id=t['project_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], status=1)\n logger.info(f'test passed:{t[\"issue_key\"]} number_passed={number_passed} number_failed={number_failed}')\n last_status = 'pass'\n if found_failure == -1:\n found_failure = 0\n number_passed += 1\n except subprocess.CalledProcessError as err:\n exec_stop = time.time()\n exec_duration = exec_stop - exec_start\n # comment = html.escape('{\"start_time\":' + str(exec_start) + ', \"end_time\":' + str(exec_stop) + ', \"duration\":' + str(exec_duration) + '}')\n comment = html.escape('{\"region\":\"' + region + '\", \"cloudlet\":\"' + cloudlet + '\", \"operator\":\"' + operator + '\", \"start_time\":' + str(exec_start) + ', \"end_time\":' + str(exec_stop) + ', \"duration\":' + str(exec_duration) + '}')\n logger.info('test failed:' + t['issue_key'])\n found_failure = 1\n number_failed += 1\n failed_list.append(t['issue_key'])\n logger.info(\"exec cmd failed. return code=: \" + str(err.returncode))\n logger.info(\"exec cmd failed. stdout=: \" + str(err.stdout))\n logger.info(\"exec cmd failed. stderr=: \" + str(err.stderr))\n status = z.update_status(execution_id=t['execution_id'], issue_id=t['issue_id'], project_id=t['project_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], status=2, comment=comment)\n # status = z.create_execution(issue_id=t['issue_id'], project_id=t['project_id'], cycle_id=t['cycle_id'], version_id=t['version_id'], status=2)\n last_status = 'fail'\n except Exception as err:\n logger.error(f'unknown error found while executing test: {err}')\n\n try:\n file_output_done = file_output + '_' + str(int(time.time())) + file_extension\n # add ending timestamp to file\n mv_cmd = 'mv {} {}'.format(file_output, file_output_done)\n logger.info(\"moving \" + mv_cmd)\n r = subprocess.run(mv_cmd, shell=True, check=True)\n except subprocess.CalledProcessError as err:\n logger.info(\"mv cmd failed. return code=: \" + str(err.returncode))\n logger.info(\"mv cmd failed. stdout=: \" + str(err.stdout))\n logger.info(\"mv cmd failed. stderr=: \" + str(err.stderr))\n\n # zip output\n # try:\n # zip_cmd = 'gzip {}'.format(file_output_done)\n # logging.info(\"zipping \" + zip_cmd)\n # r = subprocess.run(zip_cmd, shell=True, check=True)\n # except subprocess.CalledProcessError as err:\n # logging.info(\"gz cmd failed. return code=: \" + str(err.returncode))\n # logging.info(\"gz cmd failed. stdout=: \" + str(err.stdout))\n # logging.info(\"gz cmd failed. stderr=: \" + str(err.stderr))\n\n # add output file to jira\n # z.add_attachment(id=t['id'], file=file_output_done)\n if os.path.isfile(file_output_done):\n z.add_attachment(id=t['execution_id'], issue_id=t['issue_id'], project_id=t['project_id'], version_id=t['version_id'], cycle_id=t['cycle_id'], file=file_output_done)\n else:\n logger.error('ERROR adding attachment. file {} does not exist'.format(file_output_done))\n\n # rename trace file to pass or fail for easier debugging\n try:\n mv_cmd = 'mv {} {}.{}'.format(file_output_done, file_output_done, last_status)\n logger.info(\"moving \" + mv_cmd)\n r = subprocess.run(mv_cmd, shell=True, check=True)\n except subprocess.CalledProcessError as err:\n logger.info(\"mv cmd failed. return code=: \" + str(err.returncode))\n logger.info(\"mv cmd failed. stdout=: \" + str(err.stdout))\n logger.info(\"mv cmd failed. stderr=: \" + str(err.stderr))\n\n # add output file to jira\n # z.add_attachment(id=t['id'], file=file_output_done)\n\n # if os.path.isfile(t['tc']) and os.access(t['tc'], os.X_OK):\n # else:\n # print(\"test case does not exist or not executable, failing tcid=\" + t['issue_key'] + \" \" + t['tc'])\n # status = z.update_status(t['id'], 2)\n\n # sys.exit(1)\n # return found_failure\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mobiledgex/edge-cloud-qa","sub_path":"jenkins/execJira_parallel.py","file_name":"execJira_parallel.py","file_ext":"py","file_size_in_byte":33799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13293445336","text":"import functools\nimport logging\nimport threading\nimport time\n\n\nTYPING_INTERVAL = 5\nCONF_START = 2000000000\nVK_METHOD_GROUPS = {'account', 'ads', 'apps', 'audio', 'auth', 'board', 'database', 'docs', 'fave', 'friends', 'gifts', 'groups', 'leads', 'likes',\n 'market', 'messages', 'newsfeed', 'notes', 'notifications', 'pages', 'photos', 'places', 'polls', 'search', 'stats', 'status', 'storage',\n 'users', 'utils', 'video', 'wall', 'widgets'}\n\nlogger = logging.getLogger('vkapi')\n\n\nclass DelayedCall:\n def __init__(self, dispatcher, method, params):\n self.dispatcher = dispatcher\n self.method = method\n self.params = params\n self._lock = threading.Lock()\n self._callback_func = None\n self._value = None\n self._computed = False\n\n def _set_value(self, value):\n with self._lock:\n self._value = value\n self._computed = True\n if self._callback_func is not None:\n self._do_callback()\n\n def _do_callback(self):\n self._callback_func(self.params, self._value)\n\n def set_callback(self, func):\n with self._lock:\n self._callback_func = func\n if self._computed:\n self._do_callback()\n return self\n\n def walk(self, func):\n def cb(req, resp):\n func(req, resp)\n if resp is None:\n return\n if 'next_from' in resp:\n if resp['next_from']:\n req['start_from'] = resp['next_from']\n self.dispatcher._callMethod(self.method, req).set_callback(cb)\n elif 'count' in resp and 'count' in req and req['count'] + req.get('offset', 0) < resp['count']:\n req['offset'] = req.get('offset', 0) + req['count']\n self.dispatcher._callMethod(self.method, req).set_callback(cb)\n self.set_callback(cb)\n return self\n\n\nclass VkMethodDispatcher:\n\n class _GroupWrapper:\n def __init__(self, group, dispatcher):\n self.group = group\n self.dispatcher = dispatcher\n\n def __getattr__(self, subitem):\n def call(**kwargs):\n return self.dispatcher._callMethod(self.group + '.' + subitem, kwargs)\n return call\n\n def __getattr__(self, item):\n if item not in VK_METHOD_GROUPS:\n raise AttributeError(item)\n return self._GroupWrapper(item, self)\n\n def _callMethod(self, method, kwargs):\n raise NotImplementedError\n\n\nclass DelayedManager(VkMethodDispatcher):\n def __init__(self, api, max_calls):\n self.api = api\n self.max_calls = max_calls\n self.queue = []\n self._lock = threading.Lock()\n\n\n def _callMethod(self, method, kwargs):\n call = DelayedCall(self, method, kwargs)\n old_queue = None\n with self._lock:\n self.queue.append(call)\n if len(self.queue) >= self.max_calls:\n old_queue = self.queue\n self.queue = []\n if old_queue:\n self._do_execute(old_queue)\n return call\n\n def _do_execute(self, methods):\n if len(methods) == 1:\n call = methods[0]\n response = self.api.apiCall(call.method, call.params)\n call._set_value(response)\n return\n query = ['return[']\n for num, i in enumerate(methods):\n query.append(self.api.encodeApiCall(i.method, i.params) + ',')\n query.append('];')\n query = ''.join(query)\n response = self.api.execute(query)\n errors = response.get('execute_errors', [])\n for call, r in zip(methods, response['response']):\n if r is False: # it's fine here\n error = errors.pop(0)\n if error['method'] != call.method:\n logger.error('Failed to match errors with methods. Response: ' + str(response))\n return\n if self.api.processError(call.method, call.params, {'error': error}):\n call.params['_retry'] = True\n self._callMethod(call.method, call.params)\n else:\n call._set_value(None)\n else:\n call._set_value(r)\n\n def sync(self):\n while True:\n with self._lock:\n if not self.queue:\n break\n old_queue = self.queue\n self.queue = []\n self._do_execute(old_queue)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.sync()\n\n\nclass LongpollMessage:\n def __init__(self, record):\n self.mid, self.flags, self.sender, self.ts, self.text, self.opt, self.extra = record\n\nclass VkError(Exception):\n pass\n\n\nclass RateLimiter:\n\n def __init__(self, interval):\n self.last_call = 0\n self.interval = interval\n self.lock = threading.RLock()\n\n def __enter__(self):\n self.lock.acquire()\n now = time.time()\n if self.last_call + self.interval > now:\n time.sleep(self.last_call + self.interval - now)\n now = time.time()\n self.last_call = now\n\n def __exit__(self, *args):\n self.lock.release()\n\n\nclass doc_types:\n TEXT_DOC = 1\n ARCHIVE = 2\n GIF = 3\n IMAGE = 4\n AUDIO = 5\n VIDEO = 6\n EBOOK = 7\n UNKNOWN = 8\n\n\nclass cached_property:\n\n def __init__(self, fun):\n self.fun = fun\n functools.update_wrapper(self, fun)\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n value = self.fun(instance)\n setattr(instance, self.fun.__name__, value)\n return value\n","repo_name":"kalinochkind/vkbot","sub_path":"vkapi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"} +{"seq_id":"72497921360","text":"import numpy\nfrom skimage.util import random_noise\n\nfrom aydin.io.datasets import camera, normalise, pollen\nfrom aydin.it.transforms.motion import (\n MotionStabilisationTransform,\n _find_shift,\n _measure_shifts,\n _shift_transform,\n)\n\n\ndef test_phase_correlation():\n a = camera()\n b = numpy.roll(camera(), shift=(30, 50), axis=tuple(range(a.ndim)))\n\n # phase_correlation = _phase_correlation(image=b, reference_image=a)\n\n shift, _ = _find_shift(b, a)\n\n # print(shift)\n assert (numpy.abs(shift - [30, 50]) < 0.1).all()\n\n\ndef test_measure_shifts_and_transform():\n shifts = tuple((5 * i, int(0.5 * i * i)) for i in range(10))\n\n # print('')\n # pprint(shifts)\n\n image = normalise(camera())\n array = numpy.stack(\n [numpy.roll(image, shift=shift, axis=(0, 1)) for shift in shifts]\n )\n\n measured_shifts, _ = _measure_shifts(array, reference_index=0)\n\n # pprint(measured_shifts)\n\n for s, ms in zip(shifts, measured_shifts):\n assert (numpy.array(s) == numpy.array(ms)).all()\n\n measured_shifts, _ = _measure_shifts(array, reference_index=0, center=False)\n # def _shift_transform(array, shifts, pad, crop, pad_mode='wrap', inverse=False):\n motion_corrected_array = _shift_transform(\n array.copy(), -measured_shifts, pad=False, crop=False\n )\n\n assert (motion_corrected_array == numpy.stack([image for _ in shifts])).all()\n\n\ndef test_correct_uncorrect():\n shifts = tuple((5 * i, int(0.5 * i * i)) for i in range(10))\n\n # print('')\n # pprint(shifts)\n\n image = normalise(pollen())[0:256, 0:256]\n array = numpy.stack(\n [add_noise(numpy.roll(image, shift=shift, axis=(0, 1))) for shift in shifts]\n )\n\n mc = MotionStabilisationTransform(axes=0)\n\n corrected_array = mc.preprocess(array.copy())\n uncorrected_array = mc.postprocess(corrected_array.copy())\n\n assert array.dtype == uncorrected_array.dtype\n assert (array == uncorrected_array).all()\n\n\ndef add_noise(image, intensity=4, variance=0.4):\n noisy = image\n if intensity is not None:\n noisy = numpy.random.poisson(image * intensity) / intensity\n noisy = random_noise(noisy, mode=\"gaussian\", var=variance)\n noisy = noisy.astype(numpy.float32)\n return noisy\n","repo_name":"royerlab/aydin","sub_path":"aydin/it/transforms/test/test_motion.py","file_name":"test_motion.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"3"} +{"seq_id":"14356847989","text":"# coding:utf-8\n# 《programming for the puzzled》实操\n# 7.找平方根\n\n\n# 线性复杂度算法\ndef findSquareRoot(n):\n if n < 0:\n print(\"要输入非负整数\")\n return -1\n i = 0\n while i*i < n:\n i += 1\n if i*i == n:\n return i\n else:\n print(n, \"不是完全平方数\")\n return -1\n\n\n# 改进,增加答案精度,指定精度和步长\ndef findSquareRoot2(n, eps, step):\n if n < 0:\n print(\"要输入非负整数\")\n return -1, 0\n numGuesses = 0.0\n ans = 0.0\n while n - ans**2 > eps:\n ans += step\n numGuesses += 1\n if abs(n - ans**2) > eps:\n # print(\"求解\", n, \"的平方根失败\")\n print(n, ans**2, n - ans**2, eps)\n return -1, numGuesses\n else:\n print(\"b\")\n # print(ans, \"是\", n, \"的近似平方根\")\n return ans, numGuesses\n \n \n# 二分搜索\ndef bisectionSearchForSquareRoot(n, eps):\n if n < 0:\n print(\"要输入非负整数\")\n return -1, 0\n numGuesses = 0\n low = 0.0\n high = n\n ans = (high + low)/2.0\n while abs(ans**2 - n) >= eps:\n if ans**2 < n:\n low = ans\n else:\n high = ans\n ans = (high + low)/2.0\n numGuesses += 1\n return ans, numGuesses\n \n \n# 线性查找\nNOTFOUND = -1\nLs = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]\ndef Lsearch(L, value):\n for i in range(len(L)):\n if L[i] == value:\n return i\n return NOTFOUND\n \n \n# 二分查找\ndef bsearch(L, value):\n lo, hi = 0, len(L) - 1\n length = hi\n while lo <= hi:\n mid = (lo+hi)//2\n if L[mid] < value:\n lo = mid + 1\n elif value < L[mid]:\n hi = mid - 1\n else:\n return mid\n # 练习2\n length = hi-lo\n print(\"当前搜索区间长度:\", length)\n return NOTFOUND\n \n \n# 练习1.二分搜索改进版\ndef bisectionSearchForSquareRoot2(n, eps):\n if n < 0:\n print(\"要输入非负整数\")\n return -1, 0\n numGuesses = 0\n low = 0.0\n # high = n\n high = max(n, 1.0)\n ans = (high + low)/2.0\n while abs(ans**2 - n) >= eps:\n if ans**2 < n:\n low = ans\n else:\n high = ans\n ans = (high + low)/2.0\n numGuesses += 1\n # print(low, high, ans, numGuesses, ans**2-n, eps)\n # input(\"按任意键继续\")\n return ans, numGuesses\n \n \n# 练习3,求方程的根\ndef fun(x):\n return x**3 + x**2 - 11\n \n \ndef findRoot(eps):\n lo, hi = -10, 10\n mid = (hi + lo)/2.0\n count = 0\n while abs(fun(mid)) > eps:\n if fun(lo)*fun(mid) < 0:\n hi = mid\n elif fun(mid)*fun(hi) < 0:\n lo = mid\n mid = (hi + lo)/2.0\n count += 1\n # print(lo, mid, hi, count, abs(fun(mid)))\n # input(\"按任意键继续\")\n return mid\n\n\nif __name__ == \"__main__\":\n n = int(input(\"输入一个完全平方数:\"))\n res = findSquareRoot(n)\n res2, numGuesses = findSquareRoot2(n, 0.01, 0.001)\n res3, numGuesses3 = bisectionSearchForSquareRoot(n, 0.01)\n if res != -1:\n print(res,\"*\", res, \"=\", res**2)\n else:\n print(\"输入有误。\")\n if res2 != -1:\n print(res2,\"*\", res2, \"=\", res2**2)\n print(\"猜测次数=\", numGuesses)\n else:\n print(\"求解失败。\")\n if res3 != -1:\n print(res3,\"*\", res3, \"=\", res3**2)\n print(\"猜测次数=\", numGuesses3)\n else:\n print(\"求解失败。\")\n \n print(\"线性查找:\", Lsearch(Ls, 59))\n print(\"二分查找:\", bsearch(Ls, 59))\n # 练习1\n res4, numGuesses4 = bisectionSearchForSquareRoot2(0.25, 0.01)\n print(res4)\n # 练习3\n print(findRoot(0.01))\n \n ","repo_name":"zwdnet/MyQuant","sub_path":"44/07/Sq.py","file_name":"Sq.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"3"} +{"seq_id":"23819641386","text":"# Python Program to multiply two numbers using Russian Peasant method\n\n# Function to multiply two numbers using Russian Peasant method\n\n\ndef russianPeasant(number1, number2):\n\n result = 0\n\n # While second number doesn't\n # become 1\n while (number2 > 0):\n\n # If second number becomes odd, add the first number to result\n if (number2 & 1):\n result = result + number1\n\n # Double the first number and halve the second number\n number1 = number1 << 1 # bitwise left shift operator\n number2 = number2 >> 1 # bitwise right shift operator\n\n return print(\"Result is\", result)\n\n# Driver code\nnumber1 = int(input(\"Enter 1st Number:\"))\nnumber2 = int(input(\"Enter 2nd Number:\"))\nrussianPeasant(number1, number2)\n\n'''\nSample I/O:\n Input:\n Enter 1st Number:5\n Enter 2nd Number:5\n\n Output:\n Result is 25\n\nTime complexity: Θ(1)\nSpace complexity: Θ(1)\n'''\n","repo_name":"HarshCasper/NeoAlgo","sub_path":"Python/other/Russian_Peasant_Algorithm.py","file_name":"Russian_Peasant_Algorithm.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":873,"dataset":"github-code","pt":"3"} +{"seq_id":"70824938641","text":"import os\nimport re\nimport sys\n\nimport IPython\nfrom rich.markdown import Markdown\nfrom rich.console import Console\n\n\nclass ShowTheSlide:\n def __init__(self, presentation, function):\n self.presentation = presentation\n self.fn = function\n\n def __repr__(self):\n if self.fn(self.presentation) is not False:\n return repr(self.presentation)\n return \"\"\n\n\nclass Presentation:\n def __init__(self, filename):\n with open(filename) as file:\n self.slides = re.split(r\"====+\", file.read())\n self.index = {}\n for i, slide in enumerate(self.slides):\n for word in slide.split():\n self.index[word.lower()] = i\n self._slide_no = 0\n self.console = Console()\n self.slides_shown = set()\n self.flags = {\"exec\"}\n\n @property\n def slide_no(self):\n return self._slide_no\n\n @slide_no.setter\n def slide_no(self, new_value):\n if 0 <= new_value <= len(self.slides) - 1:\n self._slide_no = new_value\n\n def bind(presentation, fn):\n return ShowTheSlide(presentation, fn)\n\n def __repr__(self):\n print(\"\\r\")\n slide = self.slides[self.slide_no]\n lines = []\n set_local, unset_local = set(), set()\n for line in slide.split(\"\\n\"):\n if line.strip().startswith(\"!!\"):\n if (\n \"alwaysexec\" in self.flags\n or self.slide_no not in self.slides_shown\n ):\n os.system(line.strip()[2:])\n continue\n elif line.strip().startswith(\"//\"):\n if \"PRACTICE\" in os.environ:\n line = f\"\\x1b[1;31m\\x1b[3m{line}\\x1b[0m\"\n else:\n continue\n elif line.strip().startswith(\"!import \"):\n exec(line.strip(\"! \\n\"), globals())\n continue\n elif line.strip().startswith(\"!image \"):\n os.system(f\"viu -t {line.removeprefix('!image ').strip()}\")\n continue\n elif line.strip().startswith(\"!printf \"):\n # to work without needing alwaysexec\n os.system(line.strip().lstrip(\"!\"))\n continue\n elif line.strip().startswith(\"!up \"):\n how_much = int(line.removeprefix(\"!up \"))\n os.system(f\"printf '\\e[{how_much}A'\")\n continue\n elif line.strip().startswith(\"!unset\"):\n flag = line.strip().split()[1]\n self.flags -= {flag}\n if line.strip().startswith(\"!unsetlocal\"):\n unset_local.add(flag)\n continue\n elif line.strip().startswith(\"!set\"):\n flag = line.strip().split()[1]\n self.flags |= {flag}\n if line.strip().startswith(\"!setlocal\"):\n set_local.add(flag)\n continue\n lines.append(line)\n markdown = Markdown(\"\\n\".join(lines).strip())\n for token in markdown.parsed:\n if (\n token.tag == \"code\"\n and token.info.startswith(\"py\")\n and \"exec\" in self.flags\n ):\n exec(token.content, globals())\n\n self.console.print(markdown)\n self.draw_slide_number()\n self.slides_shown.add(self.slide_no)\n self.flags |= unset_local\n self.flags -= set_local\n return \"\"\n\n def draw_slide_number(self):\n display = f\" {self.slide_no} \"\n position = self.console.width - len(display)\n print(f\"\\x1b[1A\\x1b[{position}G\\x1b[1;46;30m{display}\", end=\"\", flush=True)\n\n\nif len(sys.argv) != 2:\n print(\"Usage: rtty path/to/slides.md\", file=sys.stderr)\n os._exit(1)\n\nPRES = Presentation(sys.argv[1])\n\n\n@PRES.bind\ndef n(presentation):\n presentation.slide_no += 1\n\n\n@PRES.bind\ndef p(presentation):\n presentation.slide_no -= 1\n\n\n@PRES.bind\ndef g(presentation):\n print(\"\\x1b[1Ago to slide: \", end=\"\", flush=True)\n presentation.slide_no = int(input())\n\n\n@PRES.bind\ndef s(presentation):\n print(\"\\x1b[1Asearch: \", end=\"\", flush=True)\n query = input().lower()\n for word, slide_no in presentation.index.items():\n if query in word:\n presentation.slide_no = slide_no\n break\n else:\n print(\"[not found]\")\n return False\n\n\n@PRES.bind\ndef d(presentation):\n # redraw\n pass\n\n\n@PRES.bind\ndef q(presentation):\n os._exit(0)\n\n\nIPython.embed(colors=\"neutral\", history_load_length=0)\nos._exit(0) # prevent import from failing\n","repo_name":"L3viathan/representty","sub_path":"representty.py","file_name":"representty.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"40796301157","text":"\"\"\"\nThis module implement a simple sample-and-hold module.\nThis is not typically implemented like this in the hardware.\nHowever we can still use this for comparison.\n\"\"\"\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom rockpool.devices.xylo.syns63300.imuif.utils import type_check\nfrom rockpool.nn.modules.module import Module\nfrom rockpool.parameters import SimulationParameter\n\n__all__ = [\"SampleAndHold\"]\n\n\nclass SampleAndHold(Module):\n \"\"\"\n Samples and holds a signal in the time dimension (BxTxC)\n \"\"\"\n\n def __init__(\n self,\n shape: Optional[Union[Tuple, int]] = (3, 3),\n sampling_period: int = 10,\n ) -> None:\n \"\"\"Object Constructor\n\n Args:\n shape (Optional[Union[Tuple, int]], optional): The number of input and output channels. Defaults to ``(3, 3)``.\n sampling_period (int): Sampling period that the signal is sampled and held. Defaults to ``10``.\n \"\"\"\n super().__init__(shape=shape, spiking_input=False, spiking_output=False)\n\n self.sampling_period = SimulationParameter(\n sampling_period, shape=(1,), cast_fn=int\n )\n \"\"\"(int) Sampling period that the signal is sampled and held\"\"\"\n\n @type_check\n def evolve(\n self, input_data: np.ndarray, record: bool = False\n ) -> Tuple[np.ndarray, Dict, Dict]:\n \"\"\"Operate always along the time axes\n\n Args:\n input_data (np.ndarray): input signal of shape ``BxTxC`` where T is the time-dimension along which sample-and-hold is done. ``(B, T, C)``\n record (bool, optional): Unused.\n\n Returns:\n Tuple[np.ndarray, Dict, Dict]:\n out_data: the python-object quantized version of the input signal.\n state_dict: empty dictionary.\n record_dict: empty dictionary.\n \"\"\"\n # BxTxC\n input_data, _ = self._auto_batch(input_data)\n input_data = np.array(input_data, dtype=np.int64)\n __B, __T, __C = input_data.shape\n\n # Generate the output data\n out_data = np.zeros_like(input_data)\n num_periods = int(np.ceil(__T / self.sampling_period))\n\n for period in range(num_periods):\n start_idx = period * self.sampling_period\n\n end_idx = (period + 1) * self.sampling_period\n end_idx = end_idx if end_idx <= __T else __T\n\n # copy and repeat the signal along the time dimension\n out_data[:, start_idx:end_idx, :] = np.repeat(\n input_data[:, start_idx, np.newaxis, :], end_idx - start_idx, axis=1\n )\n\n out_data = np.array(out_data, dtype=object)\n return out_data, {}, {}\n","repo_name":"synsense/rockpool","sub_path":"rockpool/devices/xylo/syns63300/imuif/rotation/sample_hold.py","file_name":"sample_hold.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"3"} +{"seq_id":"42587894960","text":"import cv2\nimport sys\nimport os\n\n\ndef detect_faces(f_cascade, colored_img, scaleFactor=1.1):\n img_copy = colored_img.copy() # create a copy of the image\n gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY) # convert image to grey scale for opencv\n faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5) # detect multiscale: some faces can be closer\n print('Faces found: ', len(faces)) # print faces found\n for (x, y, w, h) in faces:\n cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 2) # draw rectangles on original coloured img\n return img_copy\n\n\ndef main():\n input_file = sys.argv[1] # input file passed ad argument\n name, ext = os.path.splitext(input_file)\n output_file = name + '_ocv' + ext # create the name of the output file\n test = cv2.imread(input_file) # open the input file\n print('img loaded')\n haar_face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') # load the cascade classifier trainig file\n print('classifier loaded')\n faces_detected_img = detect_faces(haar_face_cascade, test)\n cv2.imwrite(output_file, faces_detected_img) # save the image\n print('file saved')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"arduino/arduino-edge-container-demo","sub_path":"fifth_iteration/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"7378250926","text":"import re\nimport asyncio\nfrom pathlib import Path\nfrom itertools import product\nfrom textobjects import textobjects\nfrom collections.abc import MutableSequence\nfrom abc import ABC, abstractmethod\nfrom watchdog import events, observers\n\nclass TextObjectObserver(ABC):\n def on_textobject_removed(self, textobject, typ, path):\n pass\n\n def on_textobject_moved(self, textobject, previous_span, typ, path):\n pass\n\n def on_textobject_added(self, textobject, typ, path):\n pass\n\nclass TextObjectStorage(MutableSequence, events.FileSystemEventHandler):\n \"\"\"Persistant storage of :class:`textobjects.TextObject` subclasses\n abstracted as a mutable sequence\n\n Attributes:\n txtobjtypes (List[textobjects.TextObject]): The TextObject subclasses which\n will be stored \n\n primaryfile (str): the path to the primary storage file. Entries will be \n added to this file when a new item is added.\n\n files (List[str]): the paths to any storage files. All occurances of \n the :obj:`txtobjtypes` in these files will show up in the sequence.\n When a textobject is updated the occurance of it in it's respective file \n will be replaced.\n \"\"\"\n\n def __init__(self, txtobjtypes, primaryfile=None, files=[]):\n self.txtobjtypes = txtobjtypes\n self.primaryfile = Path(primaryfile)\n self.files = [Path(f) for f in files]\n if self.primaryfile not in self.files:\n self.files.append(primaryfile)\n self._entries = None\n self.observers = []\n\n def entries(self, updated=False):\n if updated or not self._entries:\n self.update()\n return self._entries\n\n def __len__(self):\n return len(self.entries())\n \n def __getitem__(self, key):\n if isinstance(key, int):\n return list(self.entries())[key]\n else:\n return self.entries().keys()[key]\n\n def __setitem__(self, key, value):\n if True not in [isinstance(value, typ) for typ in self.txtobjtypes]:\n changed = False\n for typ in self.txtobjtypes:\n try:\n value = typ(text=item)\n changed = True\n except:\n pass\n if not changed:\n raise ValueError(f'{item} is not in a supported format')\n obj = self[key]\n (typ, path) = self.entries()[obj]\n start, end = obj.span\n text = path.read_text()\n text = text[:start] + value + text[end:]\n path.write_text(text)\n self.update()\n \n def __delitem__(self, key):\n obj = self[key]\n typ, path = self.entries()[obj]\n start, end = obj.span\n text = path.read_text()\n text = text[:start] + text[end:].lstrip('\\n')\n path.write_text(text)\n self.update()\n\n def __str__(self):\n return str(self.entries())\n\n def insert(self, index, item):\n if True not in [isinstance(item, typ) for typ in self.txtobjtypes]:\n changed = False\n for typ in self.txtobjtypes:\n try:\n item = typ(text=item)\n changed = True\n except:\n pass\n if not changed:\n raise ValueError(f'{item} is not in a supported format')\n if index < len(self):\n obj = self[index]\n (typ, path) = self.entries()[obj]\n start, end = obj.span\n text = path.read_text()\n text = text[:end] + item + text[end:]\n path.write_text(text)\n elif index == len(self):\n with self.primaryfile.open('a') as pf:\n pf.write(str(item).strip('\\n') + '\\n')\n else:\n raise IndexError('index must not exceed len() {len(self)}')\n self.update()\n\n def __iter__(self):\n return iter(self.entries().keys())\n\n def __contains__(self, other):\n return other in self.entries()\n \n def __reversed__(self):\n return reversed(self.entries())\n\n def subscribe(self, observer: TextObjectObserver):\n self.observers.append(observer)\n\n def on_modified(self, event):\n if Path(event.src_path).name in [p.name for p in self.files]:\n self.update()\n\n def update(self):\n old = self._entries\n self._entries = {obj: (typ, p) for (p, typ) in product(self.files, self.txtobjtypes) \n for obj in typ.findall(p.read_text())}\n self.__determine_changes(old, self._entries)\n\n def __determine_changes(self, old, new):\n added = []\n if old is None:\n added = new.keys()\n else:\n newset, oldset = set(new), set(old)\n added += newset - oldset\n removed = oldset - newset\n\n for obj1, obj2 in product(oldset, newset):\n if obj1 == obj2 and obj1.span != obj2.span:\n for obs in self.observers:\n obs.on_textobject_moved(obj2, obj1.span, *new[obj2])\n\n for txtobj in removed:\n for obs in self.observers:\n obs.on_textobject_removed(txtobj, *old[txtobj])\n\n for txtobj in added:\n for obs in self.observers:\n obs.on_textobject_added(txtobj, *new[txtobj])\n\nclass TextObjectDirectoryTree(TextObjectStorage):\n \"\"\"A TextObjectStorage spanning a directory structure\n\n Args:\n txtobjtypes (List[TextObject]): The TextObject subclasses which will be considered\n writefile (str): The path to the file which newly added items will be written to.\n it can be any file and entries will be placed at the end of the file. If the file\n does not exist a new one will be created\n root (str): Path to the root directory \n glob (str): the glob pattern to look for within the root directory \n recursive (bool): if true subdirectories will be considered recursivly, equivelant to \n prepending **/ to the glob\n\n \"\"\"\n def __init__(self, txtobjtypes, writefile, root, glob, recursive=False):\n self.txtobjtypes = txtobjtypes\n self.primaryfile = Path(writefile)\n if not self.primaryfile.exists():\n self.primaryfile.touch()\n self.root = Path(root)\n if recursive:\n files = self.root.rglob(glob)\n else:\n files = self.root.glob(glob)\n self.files = list(files)\n\n if self.primaryfile not in self.files:\n self.files.append(self.primaryfile)\n\n self._entries = None\n self.observers = []\n self.update()\n\nclass TextObjectStorageSyncronization:\n \"\"\"Context manager which updates a :obj:`TextObjectStorage`\n each time any of the underlying files are changed\"\"\"\n\n def __init__(self, *textobjectstores):\n self.textobjectstores = textobjectstores\n\n def __enter__(self):\n self.stopfunc = watch(*self.textobjectstores)\n\n def __exit__(self, type, value, traceback):\n self.stopfunc\n\ndef sync(*textobjectstorage: TextObjectStorage):\n \"\"\"create a Context Manager which handles syncronization\"\"\"\n return TextObjectStorageSyncronization(*textobjectstorage)\n\ndef watch(*textobjectstores: TextObjectStorage):\n \"\"\"start a co-routine to watch the files assocated to \n the :obj:`textobjectstores` an update the TextObjectStorage instances\n when the files are modified\n\n Args:\n *textobjectstores (TextObjectStorage): the TextObjectStorage instances to be updated\n\n Retuns:\n (Callable) a function which stops the coroutine\n \"\"\"\n return asyncio.run(asyncwatch(*textobjectstores))\n\nasync def asyncwatch(*textobjectstores: TextObjectStorage):\n obs = observers.Observer()\n for st, path in [(st, p) for st in textobjectstores for p in st.files]:\n obs.schedule(st, str(path.parent), recursive=False)\n async def _watch():\n obs.start()\n while obs.is_alive:\n await asyncio.sleep(0.5)\n asyncio.create_task(_watch())\n return lambda: obs.stop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"BlakeASmith/textobjects","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":8208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21175470828","text":"from rest_framework import serializers\nfrom projects.models import Project, Tag, Review\nfrom users.models import Profile\n\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = Profile\n fields = '__all__'\n\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = '__all__'\n \n \n\nclass ReviewSerializer(serializers.ModelSerializer):\n class Meta:\n model = Review\n fields = '__all__'\n\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n owner = ProfileSerializer(many=False)\n #* Now, instead of the 'owner' field of the model 'Project' return a 'profile ID', will instead return the entire profile object\n tags = TagSerializer(many=True)\n #* Just did the same thing for tags. And many is set to True, as there can be multiple tags\n \n reviews = serializers.SerializerMethodField()\n \n class Meta:\n model = Project\n fields = '__all__'\n \n def get_reviews(self, obj): # 'self' here doesn't refer to the 'model project', but instead it refers to the 'SerializerMethodField class'\n reviews = obj.review_set.all()\n serializer = ReviewSerializer(reviews, many=True)\n \n return serializer.data","repo_name":"AryanGodara/DevSearch_Django_WebApp","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"33640223354","text":"import pandas as pd\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom xgboost import XGBClassifier\n\nfrom src.model import Framework\nfrom src.preprocess import Preprocessing\n\nif __name__ == \"__main__\":\n \"\"\"\n Main file to train and/or predict\n \"\"\"\n\n # Parameters\n RANDOM_STATE = 42\n METHOD = \"undersampling\"\n FILE = \"./data/creditcard.csv\"\n SAVED_DIR = \"./pretrained\"\n\n # 1) Training\n\n # Load & process data\n df = pd.read_csv(FILE)\n Processing = Preprocessing().fit(df, columns_name=[\"Time\", \"Amount\"])\n\n # Apply sampling\n Xtrain, Xtest, ytrain, ytest = Processing.get_sample(\n method=METHOD, t_size=0.3, random_state=RANDOM_STATE\n )\n\n # Train & save models\n my_models = {\n \"Logistic_regression\": LogisticRegression(random_state=RANDOM_STATE),\n \"Naive_Bayes\": GaussianNB(),\n \"XGBoost\": XGBClassifier(),\n }\n learners = Framework(models=my_models).fit(Xtrain, ytrain, SAVED_DIR)\n\n # Get performance score\n scores = learners.get_scores(Xtest, ytest)\n print(pd.DataFrame({\"model_name\": scores[0], \"AUC\": scores[1]}))\n\n # 2) Inference\n\n # Predict\n # yhat = Framework.predict(Xtest, \"./pretrained/Logistic_regression.pkl\")\n","repo_name":"Jo-dsa/FraudDetection","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24523232235","text":"from sklearn.cluster import KMeans\nfrom PIL import Image\nimport matplotlib.pyplot as mat\nimport plotly.plotly as py\n\n\nclass KMeansRunner:\n\n def __init__(self, df, numberOfClusters, numberOfRuns):\n self.df=df\n self.numberOfClusters = numberOfClusters\n self.numberOfRuns = numberOfRuns\n\n # run kmeans algorithm\n kmeansRes = KMeans(n_clusters=numberOfClusters, n_init=numberOfRuns).fit(self.df)\n\n # update df\n pred = kmeansRes.predict(self.df)\n self.df['ClusterNumber'] = pred\n\n self.scatter()\n self.horopleth()\n\n def scatter(self):\n #scatter: x=social support, y=generosity, color by kmeans result\n self.scatter = mat.scatter(x=self.df['Social support'], y=self.df['Generosity'], c=self.df['ClusterNumber'])\n mat.colorbar(self.scatter)\n mat.xlabel(\"Social support\")\n mat.ylabel(\"Generosity\")\n mat.title(\"K-Means Clustering\")\n mat.savefig('scatter.png')\n\n def horopleth(self):\n scl = [[0.0, 'rgb(242,240,247)'], [0.2, 'rgb(218,218,235)'], [0.4, 'rgb(188,189,220)'],\n [0.6, 'rgb(158,154,200)'], [0.8, 'rgb(117,107,177)'], [1.0, 'rgb(84,39,143)']]\n\n data = [dict(\n type='choropleth',\n colorscale=scl,\n autocolorscale=False,\n locations=self.df.axes[0].tolist(),\n z=self.df['ClusterNumber'],\n locationmode='country names',\n text=self.df.axes[0].tolist(),\n marker=dict(\n line=dict(\n color='rgb(255,255,255)',\n width=2\n )\n ),\n colorbar=dict(\n title=\"Cluster\"\n )\n )]\n\n layout = dict(\n title='K-Means Clustering Visualization',\n geo=dict(\n scope='Cluster Group',\n projection=dict(type='Mercator'),\n showlakes=True,\n lakecolor='rgb(255, 255, 255)',\n ),\n )\n py.sign_in(\"talshemt\", \"zgAzXUHXrBOenhAwSiDz\")\n fig = dict(data=data, layout=layout)\n py.plot(fig, validate=False, filename='horopleth', auto_open=False)\n py.image.save_as(fig, filename='horopleth.png')\n","repo_name":"avivatal/KMeansClustering","sub_path":"venv/KMeansRunner.py","file_name":"KMeansRunner.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41219465226","text":"\"\"\"\nGiven a binary tree, determine if it is height-balanced.\n\nExample:\nInput: root = [3,9,20,null,null,15,7]\nOutput: true\n\"\"\"\nclass TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def balancedBinaryTree(self, root):\n def height(node: TreeNode) -> int:\n # 如果节点为空,高度为0\n if not node:\n return 0\n # 递归计算左子树和右子树的高度,并取较大值加1作为当前节点的高度\n return max(height(node.left), height(node.right)) + 1\n\n # 如果根节点为空,说明是空树,返回True\n if not root:\n return True\n\n # 检查当前节点的左子树和右子树的高度差是否小于等于1,\n # 同时递归地检查左子树和右子树是否都是平衡的\n return (\n abs(height(root.left) - height(root.right)) <= 1 and\n self.isBalanced(root.left) and\n self.isBalanced(root.right)\n )","repo_name":"BrianQJN/Daily-Leecode-Prac","sub_path":"Trees/110_Balance_Binary_Tree.py","file_name":"110_Balance_Binary_Tree.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21861722106","text":"import datetime\n\nfrom src.backend.data.fiscaldata_treasury_gov.treasury_api import TreasuryAPI\n\n\nclass HistoricalDebtOutstanding(TreasuryAPI):\n\n \"\"\"\n A class to interact with Historical Debt Outstanding.\n\n Ref: https://fiscaldata.treasury.gov/datasets/historical-debt-outstanding/historical-debt-outstanding\n\n available_fields = ['record_date',\n 'debt_outstanding_amt',\n 'src_line_nbr',\n 'record_fiscal_year',\n 'record_fiscal_quarter']\n\n Updated: Annually\n\n \"\"\"\n\n def __init__(self):\n\n _default_fields = ['record_date',\n 'debt_outstanding_amt']\n\n _end_point = 'v2/accounting/od/debt_outstanding'\n\n super().__init__(endpoint=_end_point,\n default_fields=_default_fields)\n\n\nif __name__ == '__main__':\n hist_debt = HistoricalDebtOutstanding()\n\n start_date = datetime.datetime(year=2017, month=1, day=1)\n end_date = datetime.datetime(year=2022, month=9, day=1)\n\n data = hist_debt.get_all_data_between_dates(start_date=start_date, end_date=end_date)\n ","repo_name":"JordanYeomans/macro_economics","sub_path":"src/backend/data/fiscaldata_treasury_gov/historical_debt_outstanding.py","file_name":"historical_debt_outstanding.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27700498999","text":"import os\nimport logging\nimport idena.emoji as emo\nimport idena.constants as con\n\nfrom telegram import ParseMode\nfrom idena.plugin import IdenaPlugin\nfrom MyQR import myqr\n\n\nclass Deposit(IdenaPlugin):\n\n QR_DIR = \"qr_codes\"\n LOGO = \"idena.png\"\n\n @IdenaPlugin.owner\n @IdenaPlugin.threaded\n @IdenaPlugin.send_typing\n def execute(self, bot, update, args):\n user_id = update.effective_user.id\n\n # Create directory for qr-code images\n qr_dir = os.path.join(self.get_plg_path(), self.QR_DIR)\n os.makedirs(qr_dir, exist_ok=True)\n\n # Get file and path of qr-code image\n qr_name = f\"{user_id}.png\"\n qr_code = os.path.join(qr_dir, qr_name)\n\n logo = os.path.join(self.get_plg_path(), con.DIR_RES, self.LOGO)\n\n address = self.api().address()\n\n if \"error\" in address:\n error = address[\"error\"][\"message\"]\n msg = f\"{emo.ERROR} Couldn't retrieve address: {error}\"\n update.message.reply_text(msg, parse_mode=ParseMode.MARKDOWN)\n logging.error(msg)\n return\n\n address = address[\"result\"]\n\n myqr.run(\n address,\n version=1,\n level='H',\n picture=logo,\n colorized=True,\n contrast=1.0,\n brightness=1.0,\n save_name=qr_name,\n save_dir=qr_dir)\n\n with open(qr_code, \"rb\") as qr_pic:\n update.message.reply_photo(\n photo=qr_pic,\n caption=f\"`{address}`\",\n parse_mode=ParseMode.MARKDOWN)\n","repo_name":"Endogen/idena-tg-wallet","sub_path":"idena/plugins/deposit/deposit.py","file_name":"deposit.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"32493102447","text":"\nimport pandas as pd\n\ndf = pd.read_csv(r\"..\\data\\processed\\peliculas_analisis_ML.csv\",index_col=0)\n\nX = df.drop('puntuacion', axis=1)\ny = df['puntuacion']\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.15,\n random_state=42)\n\nfrom catboost import CatBoostRegressor\n\nseed = 42\n\nCatBoost_Model = CatBoostRegressor(random_state= seed, verbose = False, learning_rate= 0.1, max_depth= 8, n_estimators= 300)\n\nfrom sklearn.ensemble import BaggingRegressor\n\nBagging_Model = BaggingRegressor(estimator=CatBoost_Model, n_estimators=10, random_state=seed)\nBagging_Model.fit(X_train, y_train)\n\nimport pickle\n\n# Save the trained model\nwith open(r'new_model.pkl', 'wb') as file:\n pickle.dump(Bagging_Model, file)","repo_name":"rupeibe/Machine-Learning_Films-Project","sub_path":"model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2561727998","text":"#!/usr/bin/env python3\nimport tkinter\nimport math\nimport random\nimport sys\n\nif len(sys.argv) != 2:\n print(\"Give the number of plot points in the first argument.\")\n print(\"Example: ./sierpinski_gasket.py 10000\")\n exit()\n\nnumber_of_points = int(sys.argv[1])\n\ndef plot_point(coordinate):\n x = coordinate[0]\n y = coordinate[1]\n canvas.create_oval(x-0.5, y-0.5, x+0.5, y+0.5, fill=\"black\")\n\nroot = tkinter.Tk()\nroot.title(\"Test 1\")\nroot.geometry(\"1000x1000\")\n\ncanvas = tkinter.Canvas(root, bg = \"white\")\ncanvas.pack(fill = tkinter.BOTH, expand = True)\n\n# vertices of the outmost triangle\nvertices = ((100, 900), (900, 900), (500, 900-800/2*math.sqrt(3)))\n\nfor coordinate in vertices:\n plot_point(coordinate)\n\ncurrent_point = (600, 800)\nfor i in range(number_of_points):\n vertex = vertices[int(3*random.random())]\n current_point = ((current_point[0] + vertex[0])/2, (current_point[1] + vertex[1])/2)\n plot_point(current_point)\n\nroot.mainloop()\n","repo_name":"rasshai/sierpinski_gasket","sub_path":"sierpinski_gasket.py","file_name":"sierpinski_gasket.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40569205982","text":"import functools\nimport typing\n\nimport numpy as np\nfrom jaxtyping import Float\nfrom maze_dataset import SPECIAL_TOKENS\n\n\ndef get_token_first_index(search_token: str, token_list: list[str]) -> int:\n return token_list.index(search_token)\n\n\nTaskSetup = typing.NamedTuple(\n \"TaskSetup\",\n [\n (\"prompts\", list[list[str]]),\n (\"targets\", str),\n ],\n)\n\n\nclass DLAProtocol(typing.Protocol):\n \"\"\"should take a dataset's tokens, and return a tuple of (prompts, targets)\"\"\"\n\n def __call__(self, dataset_tokens: list[list[str]], **kwargs) -> TaskSetup:\n ...\n\n\nclass DLAProtocolFixed(typing.Protocol):\n \"\"\"should take a dataset's tokens, and return a tuple of (prompts, targets)\n\n this variant signifies it's ready to be used -- no keyword arguments are needed\n \"\"\"\n\n def __call__(self, dataset_tokens: list[list[str]]) -> TaskSetup:\n ...\n\n\ndef token_after_fixed_start_token(\n dataset_tokens: list[list[str]],\n start_token: str = SPECIAL_TOKENS.PATH_START,\n offset: int = 1,\n) -> TaskSetup:\n \"\"\"in this task, we simply predict the token after `start_token`\n\n # Parameters:\n - `dataset_tokens : list[list[str]]`\n list of string-lists\n - `start_token : str`\n token to look for\n (defaults to `SPECIAL_TOKENS.PATH_START`)\n - `offset : int`\n which token to predict:\n 1: the token after `start_token`, given everything up to and including `start_token`\n 0: the token at `start_token`, given everything up to and **not** including `start_token`\n (defaults to `1`)\n\n # Returns:\n - `TaskSetup`\n tuple of (prompts, targets)\n \"\"\"\n\n prompts: list[list[str]] = list()\n targets: list[str] = list()\n\n for maze_tokens in dataset_tokens:\n path_start_idx: int = get_token_first_index(start_token, maze_tokens)\n prompt_tokens: list[str] = maze_tokens[: path_start_idx + offset]\n prompts.append(prompt_tokens)\n targets.append(maze_tokens[path_start_idx + offset])\n\n return TaskSetup(prompts=prompts, targets=targets)\n\n\ndef rand_token_in_range(\n dataset_tokens: list[list[str]],\n start_token: str = SPECIAL_TOKENS.PATH_START,\n end_token: str = SPECIAL_TOKENS.PATH_END,\n start_offset: int = 1,\n end_offset: int = -1,\n) -> TaskSetup:\n \"\"\"predict some random token between (non-inclusive) `start_token` and `end_token`\"\"\"\n n_samples: int = len(dataset_tokens)\n\n prompts: list[list[str]] = list()\n targets: list[str] = list()\n positions_p: Float[np.ndarray, \"n_samples\"] = np.random.uniform(size=(n_samples,))\n\n for i, sample_tokens in enumerate(dataset_tokens):\n start_idx: int = (\n get_token_first_index(start_token, sample_tokens) + start_offset\n )\n end_idx: int = get_token_first_index(end_token, sample_tokens) + end_offset\n\n selected_token_idx: int\n if start_idx < end_idx:\n selected_token_idx = int(positions_p[i] * (end_idx - start_idx) + start_idx)\n else:\n selected_token_idx = start_idx\n\n prompts.append(sample_tokens[:selected_token_idx])\n targets.append(sample_tokens[selected_token_idx])\n\n return TaskSetup(prompts=prompts, targets=targets)\n\n\nLOGIT_ATTRIB_TASKS: dict[str, DLAProtocolFixed] = {\n \"path_start\": functools.partial(\n token_after_fixed_start_token, start_token=SPECIAL_TOKENS.PATH_START, offset=0\n ),\n \"origin_after_path_start\": functools.partial(\n token_after_fixed_start_token, start_token=SPECIAL_TOKENS.PATH_START, offset=1\n ),\n \"first_path_choice\": functools.partial(\n token_after_fixed_start_token, start_token=SPECIAL_TOKENS.PATH_START, offset=2\n ),\n \"path_end\": functools.partial(\n token_after_fixed_start_token, start_token=SPECIAL_TOKENS.PATH_END, offset=0\n ),\n \"final_before_path_end\": functools.partial(\n token_after_fixed_start_token, start_token=SPECIAL_TOKENS.PATH_END, offset=-1\n ),\n \"rand_path_token\": functools.partial(\n rand_token_in_range,\n start_token=SPECIAL_TOKENS.PATH_START,\n end_token=SPECIAL_TOKENS.PATH_END,\n start_offset=1,\n end_offset=-1,\n ),\n \"rand_path_token_non_endpoint\": functools.partial(\n rand_token_in_range,\n start_token=SPECIAL_TOKENS.PATH_START,\n end_token=SPECIAL_TOKENS.PATH_END,\n start_offset=3,\n end_offset=-2,\n ),\n}\n","repo_name":"AISC-understanding-search/maze-transformer","sub_path":"maze_transformer/mechinterp/logit_attrib_task.py","file_name":"logit_attrib_task.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"25929846764","text":"import math\n\n\ndef square(side):\n \"\"\"\n Find the perimeter, area and diagonal of the square\n \"\"\"\n perimeter = 4 * side\n area = side * 2\n diagonal = side * math.sqrt(2)\n return (perimeter, area, diagonal)\n\nprint(square(3))\n","repo_name":"Tetiana-Kulyk/Homeworks","sub_path":"HW_6/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72196117841","text":"\"\"\"\r\nCULC Breakout Rooms:\r\n(5) 4-person room\r\n(9) 6-person room\r\n(1) 7-person room\r\n(1) 8-person room\r\n(1) 9-person room\r\n(2) 10-person room\r\n(3) 12-person room\r\n\"\"\"\r\n\r\nimport random\r\nimport networkx as nx\r\nimport matplotlib.pylab as plt\r\n\r\ndef generate_groups():\r\n #40 groups generated with random group sizes and times\r\n possible_hours = list(range(1, 25))\r\n possible_group_sizes = [4, 6, 7, 8, 9, 10, 12]\r\n\r\n with open('readme.txt', 'w') as f:\r\n for i in possible_hours[::2]:\r\n group_sizes = random.sample(possible_group_sizes, 3)\r\n for x in group_sizes:\r\n f.write(str(i) + \" \" + str(x) + \"\\n\")\r\n\r\ngenerate_groups()\r\ninputted_group_size = int(input(\"Enter the size of your study group: \"))\r\ninputted_desired_time = int(input(\"Enter the start time of your reservation (you will have a 2 hour block). Use 24 hour clock: \"))\r\n\r\nG = nx.DiGraph()\r\nG.add_nodes_from([1, 2, 3, 4])\r\nG.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 1), (1, 3)])\r\nfor u, v, d in G.edges(data=True):\r\n d['weight'] = rd.random()\r\nedges, weights = zip(*nx.get_edge_attributes(G, 'weight').items())\r\nnx.draw(G, node_color='b', edge_color=weights, width=2, with_labels=True)\r\n\r\nplt.show()\r\n\r\n\r\n#return options for rooms\r\n#if no options, ask to restart and select new time\r\n","repo_name":"akjadhav/Math3012Project","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42812771049","text":"from typing import List\n\n\nclass Solution:\n def maxChunksToSorted(self, arr: List[int]) -> int:\n N = len(arr)\n leftMax = [0] * N\n rightMin = [0] * N\n ma = float('-inf')\n for i in range(N):\n ma = max(ma, arr[i])\n leftMax[i] = ma\n mi = float('inf')\n for i in range(N-1, -1, -1):\n mi = min(mi, arr[i])\n rightMin[i] = mi\n res = 0\n for i in range(N-1):\n if leftMax[i] <= rightMin[i+1]:\n res += 1\n return res + 1\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.maxChunksToSorted([2, 1, 3, 4, 4])\n print(result)\n","repo_name":"kenwoov/PlayLeetCode","sub_path":"Algorithms/Hard/768. Max Chunks To Make Sorted II/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31996485405","text":"############################\n#Author: Maanus Gulia\n#email: mgulia@purdue.edu\n#ID: ee364a15\n#Date: 1/17/2019\n############################\n\nimport os # List of module import statements\nimport sys # Each one on a line\nimport re\n\n#Module level Variables. (Write this statement verbatim.)\n######################################################\nDataPath = os.path.expanduser(\"~ee364/DataFolder/Lab13\")\n\nfrom measurement import calculateDistance\n\n\ndef getTuple(input):\n out = (0, 0)\n var = 0\n filePath = os.path.join(DataPath, \"coordinates.dat\")\n with open(filePath, 'r') as FILE:\n dataFile = FILE.readlines()\n group = []\n for line in dataFile:\n temp = line.split(\",\")\n group.append(temp)\n for i in group:\n temp = '\"' + input + '\"'\n if i[0] == temp:\n var = (float(i[2][2:-1]), float(i[3][2:-1]))\n\n if var == 0:\n return out\n else:\n return var\n\n\ndef getCost(sourceZip, destinationZip):\n\n part1 = getTuple(sourceZip)\n part2 = getTuple(destinationZip)\n\n output = calculateDistance(part1, part2)\n output *= .01\n\n output = float(format(output, '.2f'))\n\n\n return output\n\n\n\nclass Package:\n\n def __init__(self, company, source, destination):\n\n cost = getCost(source, destination)\n\n self.company = company\n self.source = source\n self.destination = destination\n self.cost = cost\n\n def __str__(self):\n\n output = str(self.source) + \" => \" + str(self.destination) + \", Cost = $\" + str(self.cost)\n\n return output\n\n def __gt__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost > other.cost:\n return True\n else:\n return False\n\n def __lt__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost < other.cost:\n return True\n else:\n return False\n\n def __eq__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost == other.cost:\n return True\n else:\n return False\n\n def __ne__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost != other.cost:\n return True\n else:\n return False\n\n def __ne__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost != other.cost:\n return True\n else:\n return False\n\n def __ge__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost >= other.cost:\n return True\n else:\n return False\n\n def __le__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Type not of Package\")\n\n if self.cost <= other.cost:\n return True\n else:\n return False\n\n def __add__(self, other):\n if isinstance(other, Package) == False:\n raise TypeError(\"Wrong type\")\n if self.company != other.company:\n raise ValueError(\"Not of the same company\")\n\n def __radd__(self, other):\n return self.__add__(other)\n\ndef extract(input):\n companyPattern = r'\"([a-zA-Z_ ]+)\"'\n companyName = re.findall(companyPattern, input)\n companyName = companyName[0]\n\n coordPattern = r'([0-9]{5})'\n coords = re.findall(coordPattern, input)\n\n source = coords[0]\n dest = coords[1]\n\n\n\n info = []\n info.append(companyName)\n info.append(source)\n info.append(dest)\n\n return info\n\n\n\n\ndef loadPackages():\n filePath = os.path.join(DataPath, \"packages.dat\")\n with open(filePath, 'r') as FILE:\n dataFile = FILE.readlines()\n #print(dataFile)\n data = []\n count = 0\n for line in dataFile:\n if (count > 0):\n temp = extract(line)\n data.append(temp)\n count += 1\n\n\n packageList = []\n\n for var in data:\n obj = Package(var[0], var[1], var[2])\n packageList.append(obj)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mgulia/Python-and-Bash-Scripting","sub_path":"Lab12/Lab12Module.py","file_name":"Lab12Module.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25794843659","text":"import random, os\n\nimport numpy as np\nimport torch\nfrom transformers import BertConfig\n\n\ndef same_seeds(seed):\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\ndef save_model(model, epoch, optimizer, scheduler, args):\n file_name = f\"{args.prefix}_{args.task}_{epoch}.ckpt\"\n torch.save({\n \"model\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n \"scheduler\": scheduler.state_dict(),\n \"epoch\": epoch,\n \"name\": args.model_name,\n }, os.path.join(args.ckpt_dir, file_name))\n\ndef get_config():\n config = BertConfig(\n hidden_size=256,\n num_hidden_layers=4,\n num_attention_heads=2,\n intermediate_size=512,\n classifier_dropout=0.3,\n pooler_fc_size=256,\n pooler_num_attention_heads=2,\n return_dict=False,\n )\n return config","repo_name":"TousakaNagio/temp_hw2","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26974537775","text":"'''\n谱减法 基础版\n'''\n\nimport numpy as np\nimport librosa\nimport math\nfrom scipy.io import wavfile\n\n\nmixture_file = \"./mixture-steadyNoise51874.wav\"\noutput_file = \"estimate\"+mixture_file.split(\"mixture\")[-1]\n# audio_data 音频数据 sr 音频采样率\naudio_data, sr = librosa.load(mixture_file, sr=16000)\n\n\nstft_audio = librosa.stft(audio_data, n_fft=512) # 对音频信号进行短时傅里叶变换\n# n_fft 可以自行设置 stft_audio的shape为(1+n_fft/2,n_frames) shape[0]代表frequency bin长度 shape[1]代表时间帧长度\n# 可以自行参考\n# http://librosa.org/doc/main/generated/librosa.stft.html\nmag_audio = np.abs(stft_audio) # 幅度谱\npha_audio = np.angle(stft_audio) # 相位谱\n\n# 噪声幅度计算 假设前5帧为silence(noise) 也可以采用其他方式估计噪声\nnoise_mean = np.zeros((mag_audio.shape[0],))\nfor i in range(0, 5):\n noise_mean += mag_audio[:,i]\nnoise_mean /= 5 # 取平均\n\n\nfor i in range(mag_audio.shape[1]):\n\n mag_audio[:,i] = mag_audio[:,i] - noise_mean\n\nmag_audio_ = np.where(mag_audio > 0, mag_audio, 0) # 大于0的部分保持不变 负数取0\n\nstft_audio_ = mag_audio_ * np.exp(1.0j*pha_audio) # 利用原始相位信息进行逆傅里叶变换变换\n\nwav_data = librosa.istft(stft_audio_)\n\nwavfile.write(output_file, sr, (wav_data * 32768).astype(np.int16))\n","repo_name":"TongtongSong/speech_experiments","sub_path":"experiments/03-speech-enhancement/Substraction/substraction.py","file_name":"substraction.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"71491482322","text":"import webapp2\nimport jinja2\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom google.appengine.api.images import get_serving_url\nimport os\n\nfrom model import User\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True\n)\n\nclass Profile(webapp2.RequestHandler):\n #get method called by webapp2 in response to HTTP get request\n def get(self):\n self.response.headers['Content-Typ'] = 'text/html'\n\n #gets current user\n user = users.get_current_user()\n\n # gets user key\n k_str = self.request.get('key')\n key = ndb.Key(urlsafe=k_str)\n\n muser = key.get()\n\n #gets posts\n posts = ndb.get_multi(muser.posts)\n\n # calculates stats\n nposts = len(muser.posts)\n nfollowers = len(muser.followers)\n nfollowing = len(muser.following)\n\n myuser = ndb.Key('User', user.user_id()).get()\n\n following = False\n\n for i in myuser.following:\n if i == key:\n following = True\n\n #assign template values to be rendered to the html page\n template_values = {\n 'user' : user,\n 'posts' : posts,\n 'nposts' : nposts,\n 'nfollowers' : nfollowers,\n 'nfollowing' : nfollowing,\n 'get_serving_url' : get_serving_url,\n 'myuser' : myuser,\n 'muser' : muser,\n 'following' : following\n }\n\n template = JINJA_ENVIRONMENT.get_template('profile.html')\n self.response.write(template.render(template_values))\n","repo_name":"bigjayray/instagramreplica","sub_path":"profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18781569812","text":"import base64\r\nimport hashlib\r\nimport hmac\r\nimport time\r\nfrom urllib2 import quote as url_quote\r\nimport socket\r\nimport sys\r\n\r\n\"\"\"parameters from the command line\"\"\"\r\nservice_bus_namespace=sys.argv[1]\r\nhub_name=sys.argv[2]\r\ndevice_name=sys.argv[3]\r\nsas_key_name=sys.argv[4]\r\nsas_key_value=sys.argv[5]\r\nprotocol=sys.argv[6] # AMQPS or HTTPS\r\n\r\n\"\"\"default values that could also be parameters\"\"\"\r\napi_version=\"2014-01\"\r\nexpiration_in_seconds=3600\r\n\r\n\"\"\"\r\nthe following methods were copied from the azure sdk for python\r\nand simplified by assuming Python 2.7 is used\r\n\"\"\"\r\ndef _decode_base64_to_bytes(data):\r\n if isinstance(data, unicode):\r\n data = data.encode('utf-8')\r\n return base64.b64decode(data)\r\n\r\ndef _encode_base64(data):\r\n if isinstance(data, unicode):\r\n data = data.encode('utf-8')\r\n encoded = base64.b64encode(data)\r\n return encoded.decode('utf-8')\r\n\r\ndef _sign_string(key, string_to_sign, key_is_base64=True):\r\n if key_is_base64:\r\n key = _decode_base64_to_bytes(key)\r\n else:\r\n if isinstance(key, unicode):\r\n key = key.encode('utf-8')\r\n if isinstance(string_to_sign, unicode):\r\n string_to_sign = string_to_sign.encode('utf-8')\r\n signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)\r\n digest = signed_hmac_sha256.digest()\r\n encoded_digest = _encode_base64(digest)\r\n return encoded_digest\r\n\r\n\"\"\"main code\"\"\"\r\n\r\nif protocol == \"HTTPS\":\r\n device_path = \"https://{}.servicebus.windows.net/{}/publishers/{}/messages?api-version={}\" \\\r\n .format(service_bus_namespace, \r\n hub_name, \r\n device_name,\r\n api_version)\r\nelif protocol == \"AMQPS\":\r\n device_path = \"sb://{}.servicebus.windows.net/{}/publishers/{}\" \\\r\n .format(service_bus_namespace, \r\n hub_name, \r\n device_name)\r\nelse:\r\n print(\"unknown protocol: '{}'\".format(protocol))\r\n sys.exit()\r\n\r\nuri = url_quote(device_path, '').lower()\r\n\r\n\"\"\"Returns the UTC datetime, in seconds since Epoch, when this signed \r\nrequest expires ({expiration_in_seconds} seconds from now).\"\"\"\r\nexpiry = int(round(time.time() + expiration_in_seconds))\r\n\r\nto_sign = \"{}{}{}\".format(uri, '\\n', expiry)\r\n\r\nsignature = url_quote(_sign_string(sas_key_value, to_sign, False), '')\r\n\r\n#requests will have to be signed by adding a header with a key of Authorization \r\n#and a value of:\r\nif protocol == \"HTTPS\":\r\n print(\"SharedAccessSignature sig={}&se={}&skn={}&sr={}\"\r\n .format(signature, expiry, sas_key_name, uri))\r\nelif protocol == \"AMQPS\":\r\n print(\"SharedAccessSignature:sig={}&se={}&skn={}&sr={}\"\r\n .format(signature, expiry, sas_key_name, uri))\r\n","repo_name":"DXFrance/BlueFrogHackfest","sub_path":"MessageCollection/EventHub/EventHubSamplePythonCode/getsastokenfordevice.py","file_name":"getsastokenfordevice.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"39686592934","text":"import re\nimport sys\nimport typing as t\nfrom argparse import ArgumentTypeError\nfrom gettext import gettext\nfrom typing import List\n\nfrom craft_cli import emit\n\nfrom juju_spell.exceptions import Abort, JujuSpellError\nfrom juju_spell.filter import FILTER_EXPRESSION_REGEX\n\nvisible_prompt_func: t.Callable[[str], str] = input\n\n\ndef _get_value_from_prompt(prompt) -> str:\n \"\"\"Get value from prompt.\"\"\"\n try:\n with emit.pause():\n return visible_prompt_func(prompt).strip()\n except (KeyboardInterrupt, EOFError):\n raise Abort(\"Aborted by user\") from None\n\n\ndef confirm(\n text: str,\n default: bool = True,\n abort: bool = False,\n prompt_suffix: str = \": \",\n) -> bool:\n \"\"\"Prompts for confirmation (yes/no question).\n\n If the user aborts the input by sending an interrupt signal this\n function will catch it and raise a :exc:`Abort` exception.\n\n If stdin is not a tty, the :exc:`JujuSpellError` exception will be raised.\n\n If user returns an empty answer, the default value is returned.\n returns default value.\n\n :param text: the question to ask.\n :param default: default answer\n :param abort: if this is set to `True` a negative answer aborts the\n exception by raising :exc:`Abort`.\n :param prompt_suffix: a suffix that should be added to the prompt.\n \"\"\"\n if not sys.stdin.isatty():\n raise JujuSpellError(\n \"Could not confirm without terminal session. Please use `--no-confirm` or\"\n \"run in virtual terminal session.\"\n )\n\n choices: str = \"Y/n\" if default else \"N/y\"\n prompt = f\"{text}[{choices}]{prompt_suffix}\"\n\n while True:\n value = _get_value_from_prompt(prompt).lower()\n\n if not value:\n return default\n elif value in (\"y\", \"yes\"):\n return True\n elif value in (\"n\", \"no\") and abort:\n raise Abort(\"Aborted by user\")\n elif value in (\"n\", \"no\"):\n return False\n\n emit.message(gettext(\"Error: invalid input\"))\n\n\ndef parse_comma_separated_str(comma_separated_str: str) -> List[str]:\n \"\"\"Parse comma separated string.\"\"\"\n result = comma_separated_str.split(\",\")\n return [obj.strip() for obj in result if obj]\n\n\ndef parse_filter(value: str) -> str:\n \"\"\"Type check for argument filter.\"\"\"\n if not (re.findall(FILTER_EXPRESSION_REGEX, value) or len(value) == 0):\n raise ArgumentTypeError(f\"Argument filter format wrong: {value}\")\n\n return value\n","repo_name":"rgildein/juju-spell","sub_path":"juju_spell/cli/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73898859282","text":"# coding: utf-8\n\nimport time\nimport hashlib\n\nimport redis\nfrom redis.exceptions import RedisError\n\nLUA_SCRIPT = '''\nlocal effects = {}\nfor idx, key in ipairs(KEYS) do\n local idxBase = (idx - 1) * 5\n local interval = tonumber(ARGV[idxBase + 1])\n local capacity = tonumber(ARGV[idxBase + 2])\n local nTokens = tonumber(ARGV[idxBase + 3])\n local timeNow = tonumber(ARGV[idxBase + 4])\n local expire = tonumber(ARGV[idxBase + 5])\n local currentTokens = -1\n local lastFillAt = timeNow\n\n if redis.call('exists', key) == 0 then\n currentTokens = capacity\n redis.call('hset', key, 'lastFillAt', timeNow)\n else\n lastFillAt = tonumber(redis.call('hget', key, 'lastFillAt'))\n if timeNow - lastFillAt > interval then\n currentTokens = capacity\n redis.call('hset', key, 'lastFillAt', timeNow)\n else\n currentTokens = tonumber(redis.call('hget', key, 'tokens'))\n if currentTokens > capacity then\n currentTokens = capacity\n end\n end\n end\n\n assert(currentTokens >= 0)\n\n if expire > 0 then\n redis.call('expire', key, expire)\n end\n\n if nTokens > currentTokens then\n redis.call('hset', key, 'tokens', currentTokens)\n for i, effect in ipairs(effects) do\n redis.call('hset', effect[1], 'tokens', effect[2])\n end\n return {key, interval, capacity, currentTokens, lastFillAt}\n else\n table.insert(effects, {key, currentTokens, nTokens})\n end\nend\n\nfor i, effect in ipairs(effects) do\n redis.call('hset', effect[1], 'tokens', effect[2] - effect[3])\nend\n\nreturn {'', 0, 0, 0, 0}\n'''\nLUA_SCRIPT_SHA1 = hashlib.sha1(LUA_SCRIPT).hexdigest()\n\n\ndef now_ms():\n return int(time.time() * 1000)\n\n\nclass RedisConsumeDenied(object):\n def __init__(self, redis_rv):\n self.redis_key = redis_rv[0]\n self.interval = redis_rv[1] / 1000\n self.capacity = redis_rv[2]\n self.current_tokens = redis_rv[3]\n self.last_fill_at = redis_rv[4]\n\n def __repr__(self):\n return ''.format(\n self.redis_key, self.interval, self.capacity, self.current_tokens,\n )\n\n\nclass RedisLimiter(object):\n\n def __init__(self,\n redis_cli=None,\n host='localhost', port=6379, db=0,\n key_prefix='limiter'):\n if not redis_cli:\n redis_cli = redis.StrictRedis(host=host, port=port, db=db)\n self.redis_cli = redis_cli\n self.key_prefix = key_prefix\n\n def get_redis_key(self, key, interval):\n return '{}:{}:{}'.format(self.key_prefix, key, interval)\n\n def get_token_count(self, key, interval):\n redis_key = self.get_redis_key(key, interval)\n return self.redis_cli.hget(redis_key, 'tokens')\n\n def consume(self, args):\n script_keys = []\n script_args = []\n the_now_ms = now_ms()\n for (key, interval, capacity, n) in args:\n redis_key = self.get_redis_key(key, interval)\n expire = interval * 2 + 15\n interval_ms = interval * 1000\n script_keys.append(redis_key)\n script_args.extend([interval_ms, capacity, n, the_now_ms, expire])\n\n for i in range(3):\n try:\n rv = self.redis_cli.evalsha(\n LUA_SCRIPT_SHA1, len(script_keys), *(script_keys + script_args)\n )\n if rv == ['', 0, 0, 0, 0]:\n return True, None\n else:\n return False, RedisConsumeDenied(rv)\n except RedisError:\n sha1 = self.redis_cli.script_load(LUA_SCRIPT)\n assert sha1 == LUA_SCRIPT_SHA1\n\n def consume_one(self, key, interval, capacity, n=1):\n return self.consume([(key, interval, capacity, n)])\n","repo_name":"TheWaWaR/bucket-limiter-py","sub_path":"bucket_limiter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3762,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"9920454256","text":"\"\"\"\nOriginal file:\nhttps://github.com/nlplab/brat/blob/master/tools/conllXtostandoff.py\n\nLicence:\nhttps://github.com/nlplab/brat/blob/master/LICENSE.md\n\n\"\"\"\n\n\n# Script to convert a CoNLL X (2006) tabbed dependency tree format\n# file into BioNLP ST-flavored standoff and a reconstruction of the\n# original text.\n\n\nimport codecs\nimport os\nimport io\nimport re\nimport sys\n\n# maximum number of sentences to include in single output document\n# (if None, doesn't split into documents)\nMAX_DOC_SENTENCES = 10\n\n# whether to output an explicit root note\nOUTPUT_ROOT = True\n# the string to use to represent the root node\nROOT_STR = 'ROOT'\n\nINPUT_ENCODING = \"UTF-8\"\nOUTPUT_ENCODING = \"UTF-8\"\n\noutput_directory = None\n\n# rewrites for characters appearing in CoNLL-X types that cannot be\n# directly used in identifiers in brat-flavored standoff\ncharmap = {\n '<': '_lt_',\n '>': '_gt_',\n '+': '_plus_',\n '?': '_question_',\n '&': '_amp_',\n ':': '_colon_',\n '.': '_period_',\n '!': '_exclamation_',\n}\n\n\ndef maptype(s):\n return \"\".join([charmap.get(c, c) for c in s])\n\n\ndef tokstr(start, end, ttype, idnum, text):\n # sanity checks\n assert '\\n' not in text, \"ERROR: newline in entity '%s'\" % (text)\n assert text == text.strip(), \"ERROR: tagged span contains extra whitespace: '%s'\" % (text)\n return \"T%d\\t%s %d %d\\t%s\" % (idnum, maptype(ttype), start, end, text)\n\n\ndef depstr(depid, headid, rel, idnum):\n return \"R%d\\t%s Arg1:T%d Arg2:T%d\" % (idnum, maptype(rel), headid, depid)\n\n\ndef output(docnum, sentences):\n global output_directory\n offset, idnum, ridnum = 0, 1, 1\n doctext = \"\"\n final_entities = []\n final_deps = []\n for si, sentence in enumerate(sentences):\n tokens, deps = sentence\n\n # store mapping from per-sentence token sequence IDs to\n # document-unique token IDs\n idmap = {}\n\n # output tokens\n prev_form = None\n\n if OUTPUT_ROOT:\n # add an explicit root node with seq ID 0 (zero)\n tokens = [('0', ROOT_STR, ROOT_STR)] + tokens\n\n for ID, form, POS in tokens:\n if \"-\" in ID:\n continue\n if prev_form is not None:\n doctext = doctext + ' '\n offset += 1\n\n # output a token annotation\n ent = ['T{}'.format(idnum), maptype(\n POS), offset, offset + len(form), form]\n final_entities.append(ent)\n # print(tokstr(\n # offset, offset + len(form), POS, idnum, form))\n assert ID not in idmap, \"Error in data: dup ID\"\n idmap[ID] = idnum\n idnum += 1\n\n doctext = doctext + form\n offset += len(form)\n\n prev_form = form\n\n # output dependencies\n for dep, head, rel in deps:\n\n # if root is not added, skip deps to the root (idx 0)\n if not OUTPUT_ROOT and head == '0':\n continue\n dep_list = ['R{}'.format(ridnum), maptype(\n rel), [['Arg1', 'T{}'.format(idmap[head])], ['Arg2', 'T{}'.format(idmap[dep])]]]\n final_deps.append(dep_list)\n #print(depstr(idmap[dep], idmap[head], rel, ridnum))\n ridnum += 1\n\n if si + 1 != len(sentences):\n doctext = doctext + '\\n'\n offset += 1\n\n return [final_entities, final_deps]\n\n\ndef process(text):\n docnum = 1\n sentences = []\n tokens, deps = [], []\n\n lines = io.StringIO(text).readlines()\n for ln, l in enumerate(lines):\n l = l.strip()\n\n # igore lines starting with \"#\" as comments\n if len(l) > 0 and l[0] == \"#\":\n continue\n\n if re.match(r'^\\s*$', l):\n # blank lines separate sentences\n if len(tokens) > 0:\n sentences.append((tokens, deps))\n tokens, deps = [], []\n\n # limit sentences per output \"document\"\n if MAX_DOC_SENTENCES and len(sentences) >= MAX_DOC_SENTENCES:\n output(docnum, sentences)\n sentences = []\n docnum += 1\n continue\n\n # Assume it's a normal line. The format is tab-separated,\n # with ten fields, of which the following are used here\n # (from http://ilk.uvt.nl/conll/):\n # 1 ID Token counter, starting at 1 for each new sentence.\n # 2 FORM Word form or punctuation symbol.\n # 5 POSTAG Fine-grained part-of-speech tag\n # 7 HEAD Head of the current token\n # 8 DEPREL Dependency relation to the HEAD.\n fields = l.split('\\t')\n\n assert len(fields) == 10\n\n ID, form, POS = fields[0], fields[1], fields[4]\n head, rel = fields[6], fields[7]\n\n tokens.append((ID, form, POS))\n # allow value \"_\" for HEAD to indicate no dependency\n if head != \"_\":\n deps.append((ID, head, rel))\n\n # process leftovers, if any\n if len(tokens) > 0:\n sentences.append((tokens, deps))\n if len(sentences) > 0:\n return output(docnum, sentences)\n else:\n return [[], []]\n\n\n# dbg:\nif __name__ == '__main__':\n import sys\n fname = sys.argv[1]\n with open(fname, \"r\") as f:\n text = f.read()\n print(process(text))\n","repo_name":"BOUN-TABILab-TULAP/tabi-rop","sub_path":"backend/src/backend/backend_proxy/misc/conllXtostandoff.py","file_name":"conllXtostandoff.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"19186412195","text":"# -*- coding:utf-8 -*-\n\n# ---------------------------------------------\n# @file Requests.py\n# @description RequestsCookie\n# @author WcJun\n# @date 2020/07/05\n# ---------------------------------------------\n\n\nimport src.main.python.spider.SpiderUtils as SpiderUtils\nimport requests\n\n\ndef main():\n http_headers = SpiderUtils.populate_headers()\n\n hello_url = \"http://192.168.0.5:939/hello/greet\"\n cookie_url = \"http://192.168.0.5:939/hello/cookie\"\n\n session = requests.Session()\n hello_response = session.get(hello_url, verify=False, headers=http_headers)\n print(\"the hello response is:\", hello_response.text)\n \n cookie_response = session.get(cookie_url, verify=False, headers=http_headers)\n print(\"the user response is:\", cookie_response.text)\n","repo_name":"photowey/python-study","sub_path":"src/main/python/spider/RequestsCookie.py","file_name":"RequestsCookie.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71888403601","text":"\"\"\"# Workers\"\"\"\n\nfrom flask import current_app, render_template, request\nfrom sqlalchemy import Boolean, Column, String\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy_modelid import ModelIdBase\nfrom sqlalchemy_mutable import MutableType\n\n\ndef enqueue(enqueue_method):\n # wraps the worker's enqueueing methods\n def enqueue_wrapper(worker, *args, **kwargs):\n if inspect(worker).identity is None:\n # ensure the worker has an id\n session = worker.manager.db.session\n session.add(worker)\n session.commit()\n if not worker.job_in_progress:\n # avoid repeat enqueuing\n job = enqueue_method(worker, *args, **kwargs)\n worker.job_finished, worker.job_in_progress = False, True\n worker.job_id = job.get_id()\n worker.manager.db.session.commit()\n # return the loading page HTML\n return render_template(worker.template, worker=worker)\n\n return enqueue_wrapper\n\n\nclass WorkerMixin(ModelIdBase):\n \"\"\"\n The worker executes a complex task using a Redis queue. When called, it \n enqueues a job and returns a loading page.\n\n When a Redis worker grabs the enqueued job, it executes the worker's \n function, `func`, passing in the worker's `args` and `kwargs`. After \n execution, the worker's script replaces the client's window location with \n a call to its `callback` view function.\n\n Parameters\n ----------\n callback : str or None, default=None\n Name of the view function to which the client will navigate once the \n worker has finished its job. If `None`, the current view function is \n re-called.\n\n template : str or None, default=None\n Name of the html template file for the worker's loading page. If \n `None`, the worker will use the manager's loading page template.\n\n loading_img_src : str or None, default=None\n Source of the loading image. If `None` the worker will use the \n manager's loading image.\n\n Attributes\n ----------\n callback : str\n Set from the `callback` parameter.\n\n template : str\n Set from the `template` parameter.\n\n loading_img_src : str\n Set from the `loading_img_src` parameter.\n\n manager : flask_worker.Manager\n The worker's manager.\n\n job_finished : bool, default=False\n Indicates that the worker has finished its job.\n\n job_in_progress : bool, default=False\n Indicates that the worker has a job in progress.\n\n job_id : str\n Identifier for the worker's job.\n \"\"\"\n _callback = Column(String)\n job_finished = Column(Boolean, default=False)\n job_in_progress = Column(Boolean, default=False)\n job_id = Column(String)\n template = Column(String)\n loading_img_src = Column(String)\n\n @property\n def callback(self):\n if self._callback:\n return self._callback\n try:\n # if operating in request context\n return request.url\n except:\n # operating outside request context\n return self._callback\n\n @callback.setter\n def callback(self, val):\n self._callback = val\n\n @property\n def manager(self):\n return current_app.extensions['manager']\n\n def __init__(self, callback=None, template=None, loading_img_src=None):\n self.callback = callback\n self.template = template or self.manager.template\n self.loading_img_src = loading_img_src or self.manager.loading_img_src\n self.reset()\n super().__init__()\n\n def reset(self):\n \"\"\"\n Resets the `job_finished`, `job_in_progress`, and `job_id` attributes.\n\n Returns\n -------\n self :\n \"\"\"\n self.job_finished, self.job_in_progress = False, False\n self.job_id = None\n return self\n\n @enqueue\n def enqueue_method(self, model, method_name, *args, **kwargs):\n \"\"\"\n Enqueue a database model's method for execution.\n\n Parameters\n ----------\n model : db.Model\n Model whose method will be enqueued.\n\n method_name : str\n Name of the model's method to enqueue.\n\n \\*args, \\*\\*kwargs :\n Arguments and keyword arguments passed to the method.\n\n Returns\n -------\n loading_page : str (html)\n The client's loading page.\n \"\"\"\n return current_app.task_queue.enqueue(\n 'flask_worker.tasks.execute_method',\n kwargs=dict(\n app_import=self.manager.app_import,\n worker_cls=self.__class__, \n worker_id=inspect(self).identity[0],\n model_cls=type(model),\n model_id=inspect(model).identity[0],\n method_name=method_name, args=args, kwargs=kwargs\n )\n )\n \n @enqueue\n def enqueue_function(self, func, *args, **kwargs):\n \"\"\"\n Enqueue the a function for execution.\n\n Parameters\n ----------\n func : callable\n Function which will be enqueued.\n\n \\*args, \\*\\*kwargs :\n Arguments and keyword arguments passed to the function.\n\n Returns\n -------\n loading_page : str (html)\n The client's loading page.\n \"\"\"\n return current_app.task_queue.enqueue(\n 'flask_worker.tasks.execute_func',\n kwargs=dict(\n app_import=self.manager.app_import,\n worker_cls=type(self), \n worker_id=inspect(self).identity[0],\n func=func, args=args, kwargs=kwargs\n )\n )","repo_name":"dsbowen/flask-worker","sub_path":"build/lib/flask_worker/worker_mixin.py","file_name":"worker_mixin.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71351496722","text":"\"\"\"setup.py file for package Pyrunc\"\"\"\n\nimport setuptools\n\nfrom utility import get_and_update_version\n\n_version = get_and_update_version()\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"Pyrunc\",\n version=\"0.0.\" + _version,\n author=\"Kartikei Mittal\",\n author_email=\"kartikeimittal@gmail.com\",\n description=\"Simple python package to write C code directly in python script\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Kartikei-12/Pyrunc\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GPLv3\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"Kartikei-12/Pyrunc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"23593786129","text":"from abc import ABC\nimport exceptions\n\nclass Vehicle(ABC):\n def __init__(self, started, weight=0, fuel=0, fuel_consumption=0):\n self.weight = weight\n self.fuel = fuel\n self.fuel_consumption = fuel_consumption\n\n def start(self):\n if self.started is not True:\n if self.fuel > 0:\n self.started = True\n else:\n raise exceptions.LowFuelError\n\n def move(self, dist_km):\n fuel_to_move = dist_km * self.fuel_consumption\n if self.fuel >= fuel_to_move:\n self.fuel = self.fuel - fuel_to_move\n\n else:\n raise exceptions.NotEnoughFuel","repo_name":"OtusTeam/Machine-Learning","sub_path":"Homeworks/homework_02/Vehicle.py","file_name":"Vehicle.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74953851600","text":"INF =int(1e9)\r\n\r\nn,m =map(int, input().split())\r\ngraph = [[INF] * (n+1) for _ in range(n+1)]\r\n\r\n#자기 자신한테 가는건 비용 0\r\nfor a in range(1, n+1):\r\n for b in range(1, n+1):\r\n if a == b:\r\n graph[a][b] = 0\r\n\r\n#각 간선에 대한 정보를 입력받아 그 값으로 초기화\r\nfor _ in range(m):\r\n #A와 B가 서로에게 가는 비용은 1이라고 설정\r\n a,b = map(int,input().split())\r\n graph[a][b] = 1\r\n graph[b][a] = 1\r\n\r\n#거쳐 갈 노드 X와 최종 목적지 노드 K를 입력 받기\r\nx,k = map(int, input().split())\r\n\r\n#점화식에 따라 플로이드 워셜 알고리즘 수행\r\nfor k in range(1, n+1):\r\n for a in range(1,n+1):\r\n for b in range(1, n+1):\r\n graph[a][b] = min(graph[a][b], graph[a][k]+graph[k][b])\r\n\r\n#수행된 결과 출력\r\ndistance = graph[1][k] + graph[k][x]\r\n\r\n#도달할 수 없는 경유 ,-1을 출력\r\nif distance >= INF:\r\n print(\"-1\")\r\nelse:\r\n print(distance)\r\n \r\n\r\n","repo_name":"YoungJin00/Python_practice","sub_path":"미래도시.py","file_name":"미래도시.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5668543497","text":"import os\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport rx\nfrom dotenv import load_dotenv\nfrom slack_sdk.socket_mode import SocketModeClient\nfrom slack_sdk.web import WebClient\nfrom slack_sdk.socket_mode.response import SocketModeResponse\nfrom slack_sdk.socket_mode.request import SocketModeRequest\nfrom rx import operators as ops\nimport rx.subject\nfrom models.messages import RequestMessage, ResponseMessage, ResponseMessageType\n\n\nclass Slackbot:\n client: SocketModeClient\n # Subject to handle all messages\n __slack_response_subject = rx.subject.Subject\n\n def __init__(self):\n # load env\n env_path = Path('.') / '.env'\n load_dotenv(dotenv_path=env_path)\n\n # client setup\n self.client = SocketModeClient(\n app_token=os.environ['APP_TOKEN'],\n web_client=WebClient(os.environ['WEB_CLIENT_TOKEN'])\n )\n\n # Subject to handle all messages\n self.slack_response_subject = rx.subject.Subject()\n\n self.client.socket_mode_request_listeners.append(self.__process)\n self.client.connect()\n\n # Process events\n def __process(self, client: SocketModeClient, req: SocketModeRequest):\n message = namedtuple('message', ['client', 'request'])\n self.slack_response_subject.on_next(message(client, req))\n\n def get_stream(self):\n def acknowledge_message(msg):\n response = SocketModeResponse(envelope_id=msg.request.envelope_id)\n self.client.send_socket_mode_response(response)\n\n return self.slack_response_subject.pipe(\n ops.filter(lambda msg:\n msg.request.type == 'events_api'),\n ops.do_action(lambda msg: acknowledge_message(msg)),\n ops.filter(\n lambda msg: msg.request.payload[\"event\"][\"type\"] == \"message\" and msg.request.payload[\"event\"].get(\n \"subtype\") is None),\n ops.map(\n lambda msg: RequestMessage(msg.request.payload[\"event\"][\"user\"], msg.request.payload[\"event\"][\"text\"],\n msg.request.payload[\"event\"][\"channel\"], event=msg.request.payload[\"event\"]))\n )\n\n def send_message(self, response: ResponseMessage):\n if response.get_message_type() == ResponseMessageType.REACTION:\n self.client.web_client.reactions_add(\n name=\"eyes\",\n channel=response.get_channel(),\n timestamp=response.get_event()[\"ts\"]\n )\n elif response.get_message_type() == ResponseMessageType.MESSAGE:\n self.client.web_client.chat_postMessage(\n channel=response.get_channel(),\n text=response.get_text(),\n attachments=response.get_attachments()\n )\n","repo_name":"emmanuelrobles/slackbot","sub_path":"bot_init/slackbot.py","file_name":"slackbot.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32459633551","text":"#!/usr/bin/env python\n\n###########################################################################\n# learn.py: Machine learning algorithms for TensorFlow skills test\n# Author: Chris Hodapp (hodapp87@gmail.com)\n# Date: 2017-11-28\n###########################################################################\n\nimport data_preprocessing\nimport graph_construction\n\nimport keras\nfrom keras.optimizers import SGD\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass BatchHistory(keras.callbacks.Callback):\n \"\"\"Keras callback for recording validation accuracy at regular\n intervals, i.e. every set number of batches. This should be\n passed to a 'fit' call via the list passed to its 'callbacks'\n keyword argument, and the model must be compiled to have\n 'accuracy' as one of its metrics.\n\n After this, the 'history' property will contain a list of\n dictionaries. Each dictionary will have the following keys and\n values in it:\n batch -- batch number for all loss & accuracy numbers\n val_loss -- validation loss\n val_acc -- validation accuracy\n loss -- training loss\n acc -- training accuracy\n \"\"\"\n def __init__(self, test_X, test_y, skip=100):\n \"\"\"Initialize a BatchHistory.\n\n Parameters:\n skip -- Interval (of batch number) at which to record accuracy\n test_X -- Input data for predicting accuracy\n test_y -- Correct labels corresponding to test_X\n \"\"\"\n self.skip = skip\n self.history = []\n # Below are required for evaluating:\n self.test_X = test_X\n self.test_y = test_y\n # 'i' tallies up the batch number:\n self.i = 0\n # These two lists store the past training accuracies and\n # losses for up to 'self.skip' batches, and they are\n # periodically used to provide an averaged training loss &\n # accuracy over several batches, and then cleared.\n self.losses = []\n self.accs = []\n def on_batch_end(self, batch, logs={}):\n self.i += 1\n self.accs.append(logs[\"acc\"])\n self.losses.append(logs[\"loss\"])\n if self.i % self.skip == 0:\n ev = self.model.evaluate(self.test_X, self.test_y, verbose=0)\n l = {}\n l[\"loss\"] = sum(self.losses) / len(self.losses)\n l[\"acc\"] = sum(self.accs) / len(self.accs)\n self.losses = []\n self.accs = []\n l[\"batch\"] = self.i\n l[\"val_loss\"] = ev[0]\n l[\"val_acc\"] = ev[1]\n self.history.append(l)\n\ndef train_model(num_epochs, batch_size, learning_rate):\n \"\"\"Trains a neural network for image classification from the SVHN\n dataset, and creates a plot giving training/testing accuracy as a\n function of batch number.\n\n Parameters:\n num_epochs -- Number of training epochs\n batch_size -- Number of examples in each training batch\n learning_rate -- Initial learning rate\n \"\"\"\n # Get data:\n train_X_orig, train_y_orig, _, _ = data_preprocessing.load_data()\n train_X_norm = data_preprocessing.normalize(train_X_orig)\n train_X, valid_X, train_y, valid_y = data_preprocessing.split(\n train_X_norm, train_y_orig)\n # One-hot encode so they can be used for input/validation:\n train_y_cat = keras.utils.to_categorical(train_y, num_classes=10)\n valid_y_cat = keras.utils.to_categorical(valid_y, num_classes=10)\n \n # Build & compile model:\n model = graph_construction.get_keras_model()\n sgd = SGD(lr=learning_rate, decay=1e-5, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n\n # Train:\n history = BatchHistory(valid_X, valid_y_cat) \n model.fit(train_X,\n train_y_cat,\n epochs=num_epochs,\n batch_size=batch_size,\n callbacks=[history],\n validation_data=(valid_X, valid_y_cat))\n # The callback slows things down a bit, but I'm not sure of a good\n # way around it. If I were testing only on specific batches of\n # validation data, it might be less of an issue.\n\n fname_base = \"model_{0}_{1}_{2}_rgb\".format(learning_rate, num_epochs,\n batch_size)\n model.save_weights(\"{0}.h5\".format(fname_base))\n\n # Plot training & validation accuracy, and loss (not called for,\n # but useful):\n b = [i[\"batch\"] for i in history.history]\n plt.plot(b, [i[\"acc\"] for i in history.history])\n plt.plot(b, [i[\"val_acc\"] for i in history.history])\n plt.ylabel('Accuracy')\n plt.xlabel('Batch')\n plt.legend(['Training', 'Validation'], loc='lower right')\n plt.savefig(\"{0}_accuracy.png\".format(fname_base))\n plt.show()\n plt.close()\n \n plt.plot(b, [i[\"loss\"] for i in history.history])\n plt.plot(b, [i[\"val_loss\"] for i in history.history])\n plt.ylabel('Loss (categorical cross-entropy)')\n plt.xlabel('Batch')\n plt.legend(['Training', 'Validation'], loc='lower right')\n plt.savefig(\"{0}_loss.png\".format(fname_base))\n plt.close()\n\nif __name__ == '__main__':\n train_model(10, 64, 0.01)\n","repo_name":"Hodapp87/yazabi-tensorflow-image-test","sub_path":"learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15075320254","text":"#!/usr/bin/env python\n\n\n# this module allows you to take user input\nimport subprocess\n# this module parses user input\nimport optparse\n#this module allows for filering using regex\nimport re\n#\n\n# \"input()\" is python 3 syntax\n# \"raw_input()\" is python 2 syntax\n\n\ndef get_arguments():\n # set method from the optparse object to parcer. allows for multiple arguments / values in cli.\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\", help=\"Interface to change its MAC address\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\", help=\"New MAC address.\")\n #captures values / inputs for use\n ( options, arguments ) = parser.parse_args()\n\n # make sure user gives values or throw an error\n if not options.interface:\n parser.error(\"[-]Please specify an interface, use --help for more info \")\n elif not options.new_mac:\n parser.error(\"[-] Please specify a new mac, use --help for more info. \")\n # options holds the values of the user inputs\n return options\n\n\ndef change_mac(interface , new_mac):\n print(\"[*] Changing MAC address for \" + interface + \" to \" + new_mac)\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"down\"])\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"up\"])\n # this line is not from the demom\n # print(\"All done! The new MAC address is: \" + new_mac)\n\n\ndef get_current_mac(interface):\n ifconfig_result = subprocess.check_output([\"ifconfig\", interface])\n # this is where we use the \"re\" module and regeex to fiter out the MAC address\n mac_address_search_result = re.search(rb\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", ifconfig_result)\n if mac_address_search_result:\n return mac_address_search_result.group(0)\n else:\n print(\"That's weird... where the hell is the MAC Address? Double check the interface.\")\n\n\n\n# the options variable holds the returned values from the get_arguments() function above\noptions= get_arguments()\ncurrent_mac = get_current_mac(options.interface)\n# in the below print statement, str() is used because \"current_mac\" is not a string\n# and cannot be read. str() converts the variable to a string and clears the error.\nprint(\"The current MAC ADDY is ===> \" + str(current_mac) + \" <===\")\nchange_mac(options.interface, options.new_mac)\n# the value of this variable should have changed to the new mac address\ncurrent_mac = get_current_mac(options.interface)\nprint(current_mac)\nif current_mac == current_mac:\n print(\"[+] MAC address was successfully changed to: ===> \" + str(current_mac) + \" <===\")\nelse:\n print(\"[+] MAC address did not change. :-(\")\n\n\n\n\n\n# this function does exactly what is says. it runs ifconfig and captures the state/value of the\n# selected hw component\n# subprocess.check_output([\"ifconfig\", options.interface])\n\n\n\n# print(ifconfig_result)\n# print(\"All done! The new MAC address is: \")\n\n# the return object will contain all of the stings that are found in groups. if there are\n# multiple matches, the firs one is .group(0)\n# print(mac_address_search_result.group(0))","repo_name":"tmosley03/mac_spfr","sub_path":"mac_spoof/mac_spoof.py","file_name":"mac_spoof.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7181415556","text":"import math\n\n\nclass MergeSort:\n\n def sort(self, arrayToSort, low, high):\n if(low < high):\n mid = math.floor((low + high)/2)\n self.sort(arrayToSort, low, mid)\n self.sort(arrayToSort, mid+1, high)\n self.merge(arrayToSort, low, mid, high)\n return arrayToSort\n\n def merge(self, arrayToSort, low, mid, high):\n leftPart = arrayToSort[low: mid+1]\n rightPart = arrayToSort[mid+1:high+1]\n i = 0\n j = 0\n k = low\n m = len(leftPart)\n n = len(rightPart)\n while((i < m) and (j < n)):\n if(leftPart[i] < rightPart[j]):\n arrayToSort[k] = leftPart[i]\n k += 1\n i += 1\n else:\n arrayToSort[k] = rightPart[j]\n k += 1\n j += 1\n while(i < m):\n arrayToSort[k] = leftPart[i]\n k += 1\n i += 1\n while(j < n):\n arrayToSort[k] = rightPart[j]\n k += 1\n j += 1\n return arrayToSort\n\n\ninputArray = [8, 7, 6, 5, 4, 3, 2, 1]\nmergeSort = MergeSort()\nprint(\"::::::::: Merge Sort :::::::::\")\nprint(\"Before Sorting ::: \", inputArray)\nprint(\"After Sorting ::: \", mergeSort.sort(inputArray, 0, 7))\nprint(\"Time complexity ::: O(n log n)\")\nprint(\"Space complexity ::: O(n)\")\n","repo_name":"ka6thick54/learning","sub_path":"sorting/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22479830304","text":"import json, os, sys\nsearch_id = 'malware--fde81448-ac55-4d27-87ee-1bc4501514e7'\n\nwith open(f'./just_rels.json') as input:\n\tsomething = json.load(input)\n#\tprint(something)\n#\tfor thing in something:\n#\t\tprint(thing)\n\tif search_id in something:\n\t\tprint(\"We found it!!!\")\n\telse:\n\t\tprint(\"Very sad, nonexistant\")\n","repo_name":"idaholab/cape2stix","sub_path":"clustering_scripts/find_sample.py","file_name":"find_sample.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"14184887052","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ntime_index_all = np.abs(np.load('time_index_all.npy'))\nspeed_right_all = np.load('speed_right_all.npy', )\nspeed_left_all = np.load('speed_left_all.npy')\n\nplt.plot(time_index_all, speed_right_all, 'r.-', label='right')\nplt.plot(time_index_all, speed_left_all, 'b.-', label='left')\nplt.xlabel('PWM')\nplt.ylabel('speed')\nplt.legend()\nplt.show()\n\n# 344, 90\n# 506, 161\n\nk = (161 - 90) / (506 - 344)\nki = 1/k\nprint(\"ki =\", ki) # ki = 2.2816901408450705 PWM/speed","repo_name":"vedenev/buggy","sub_path":"raw_codes/tt010_plot_frequency_vs_pwm.py","file_name":"tt010_plot_frequency_vs_pwm.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36561988464","text":"#Kristen Ching\nimport random\nclass User:\n def __init__(self, firstName, lastName, avat=\"\"):\n self.first = firstName\n self.last = lastName\n self.avatar = avat\n self.ID = random.randint(0, 100000)\n\n def setVals(self, firstName, lastName, avat):\n self.first = firstName\n self.last = lastName\n self.avatar = avat\n self.ID = random.randint(0, 100000)\n\n def __str__(self):\n return \"Customer Info... \\nFirst Name: \" + self.first + \\\n \"\\nLast Name: \" + self.last + \\\n \"\\navatar: \" + self.avatar + \\\n \"\\nUser ID: \" + str(self.ID)\n\ndef main():\n firstName = input(\"Please enter your first name. \")\n lastName = input(\"Please enter your last name. \")\n createAvat = input(\"Would you like to use a public avatar? Enter y/n \")\n if createAvat == \"y\":\n avat = input(\"Please enter a name for your public avatar. \")\n user1 = User(firstName, lastName, avat)\n else:\n user1 = User(firstName, lastName)\n\n print(user1.__str__())\n \nmain()\n","repo_name":"KristenChing/Ching_Kristen","sub_path":"Python/Lab_11/Lab_11.2/EX_05_The_User_Class.py","file_name":"EX_05_The_User_Class.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5224467458","text":"# imports\nimport sys\nsys.path.append('src')\n\nfrom config import DEFAULT_CHECKPOINT_PATH\nfrom config import DEFAULT_DATA_SET_PATH\nfrom data import DataSet\nfrom predicting import Predicter\n\nimport logging\n\nimport tensorflow as tf\n\n# setup\nlogging.basicConfig(level=logging.DEBUG)\n\n# variables\nflags = tf.app.flags\nflags.DEFINE_string('eval_text', None, 'Text to get output from.')\nflags.DEFINE_string('checkpoint_path', DEFAULT_CHECKPOINT_PATH,\n 'Path to checkpoint, if no file is given it tries to get '\n 'the latest checkpoint in that folder.')\nflags.DEFINE_string('data_set_path', DEFAULT_DATA_SET_PATH,\n 'Path to dataset.')\nFLAGS = flags.FLAGS\n\n\n# functions\ndef main():\n data_set = DataSet.load(FLAGS.data_set_path)\n print(data_set.output_vocabulary.token_to_id)\n\n logging.info('building predicter')\n predicter = Predicter(data_set.output_vocabulary, FLAGS.checkpoint_path)\n\n logging.info('getting output')\n text = ' '.join(predicter.get_output(FLAGS.eval_text))\n print(text)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"OliverEdholm/Tensorflow-Easy-Seq2Seq","sub_path":"evaluate_outputs.py","file_name":"evaluate_outputs.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"9494974342","text":"# %% [markdown]\n'''\n# Fitting geometric models with gradient descent\n'''\n# %% [markdown]\n'''\nThe process of fitting known geometric models to scattered data is well known and resolved since a long time. \nIndeed, Legendre was one of the first to use least-square to do such tasks legendre1805nouvelles,\nwhen he wanted to fit an equation for the shape of the earth !\n\nThe idea is quite simple: knowing the model definition, it is possible to estimate its best parameters \nthat fits well to the input noisy data.\n'''\n# %% [markdown]\n'''\n## tl;dr\n1. Mathematical model definition for your data\n2. Cost function\n3. Optimisation (gradient descent with cost function derivative on all the parameters)\n4. Hyper-parameters optimisation with training set (learning rate)\n5. Model bias with testing set\n'''\n# %% [markdown]\n'''\n## 1. Optimisation\n'''\n# %% [markdown]\n\n# Given a mathematical model with parameters $\\Theta$, we can define its minimization function $\\xi(\\Theta)$ (the inverse likelihood).\n# This represents the fitness of the model given some data, so if $\\xi(\\Theta)$ is minimum, then the parameters are best suited to the data.\n\n# To optimize the parameters, one could check every possible parameters $\\Theta_i$ and compute directly $\\xi(\\Theta_i)$.\n# Unfortunately, with our current compute power, this is time consuming.\n# Especially if we have more than one parameter, then the manifold would be to large to explore.\n# Imagine the difference between exploring a one dimension line vs exploring a 3D surface!\n\n# \"manifold\"\n\n# This is why it is important to have a dynamic strategy to find the local minimum, and this can be done using gradients.\n# We can calculate the gradient of the energy function among our parameters:\n# \\begin{equation}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta \\Theta} = \\nabla\\xi(\\Theta)\n# \\end{equation}\n\n# The gradient of the energy function can then be used to update the parameter every step, and converge to a local minima:\n# \\begin{equation}\n# \\Theta(t+1) = \\Theta(t) - \\mu\\nabla\\xi(\\Theta(t)) \n# \\end{equation}\n\n# One could compute directly the optimal solution by equalizing the gradient of energy function with 0:\n# \\begin{equation}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta \\Theta} = 0\n# \\end{equation}\n\n# When this method works well with little data and few parameters, it can be computationnaly impossible to compute the best solution [see this thread for more information](https://stats.stackexchange.com/questions/278755/why-use-gradient-descent-for-linear-regression-when-a-closed-form-math-solution). \n# Morevover, it is not always easy to mathematically extract the optimal parameters.\n\n# %% [markdown]\n'''\n## 2. Example with sphere fitting\n'''\n# %% [markdown]\n'''\n### 2.1 Cost function\n'''\n# %% [markdown]\n\n# To extract the cost function, we first need to define what is a sphere.\n# The equation of a 3D sphere with radius $r$ and center $(a, b, c)$ is known as:\n# \\begin{equation}\n# r^2 = (x-a)^2 + (y-b)^2 + (z-c)^2\n# \\end{equation}\n\n# Every point $(x,y,z)$ must satisfy this equation to be on the sphere, if not, the point is outside the sphere.\n# Using this condition, we can deduce the energy function:\n# \\begin{equation}\n# \\xi(\\Theta) = \\sum_i^n(L_i - r)^2\n# \\end{equation}\n# with,\n# \\begin{equation}\n# L_i = \\sqrt{(x_i-a)^2 + (y_i-b)^2 + (z_i-c)^2}\n# \\end{equation}\n# This function will verify if every known point $i$ fits well with the parameters $a, b, c$ and $r$.\n# Let's implement it in python !\n# %%\n## imports\nimport numpy as np\nimport math\nimport plotly.graph_objects as go\n\n# fixing numpy random state for reproducibility\nnp.random.seed(0)\n# %%\ndef sph_loss(T, x):\n L = np.sqrt((x[:,0] - T[0])**2 + (x[:,1] - T[1])**2 + (x[:,2] - T[2])**2)\n return L\n# %%\ndef cost_function(T, x):\n L = sph_loss(T, x)\n return np.sum( (L - T[3])**2 )\n# %% [markdown]\n'''\n### 2.2 Gradient of the cost function\n'''\n# %% [markdown]\n\n# Now that we have the cost funtion, we can compute its gradient for every parameters (a, b, c, r). \n# \\begin{equation}\n# \\nabla\\xi(\\Theta) =\n# \\begin{bmatrix}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta r} \\\\\n# \\frac{\\delta \\xi(\\Theta)}{\\delta a} \\\\\n# \\frac{\\delta \\xi(\\Theta)}{\\delta b} \\\\\n# \\frac{\\delta \\xi(\\Theta)}{\\delta c} \n# \\end{bmatrix}\n# \\end{equation}\n\n# Then, with $m$ as the number of 3D points:\n\n# \\begin{equation}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta r} = -2\\sum_{i=1}^m (L_i -r)\n# \\end{equation}\n\n# \\begin{equation}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta a} = 2\\sum_{i=1}^{m}((x_i-a) + r\\frac{\\delta L_i}{\\delta a}); \\qquad \\frac{\\delta L_i}{\\delta a} = \\frac{a-x_i}{L_i}\n# \\end{equation}\n\n# \\begin{equation}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta b} = 2\\sum_{i=1}^{m}((y_i-b) + r\\frac{\\delta L_i}{\\delta b}); \\qquad \\frac{\\delta L_i}{\\delta b} = \\frac{b-y_i}{L_i}\n# \\end{equation}\n\n# \\begin{equation}\n# \\frac{\\delta \\xi(\\Theta)}{\\delta c} = 2\\sum_{i=1}^{m}((z_i-c) + r\\frac{\\delta L_i}{\\delta c}); \\qquad \\frac{\\delta L_i}{\\delta c} = \\frac{c-z_i}{L_i}\n# \\end{equation}\n\n# %% [markdown]\n# In python,\n\n# %%\n## cost function derivative\ndef derivative_cost_function(T, x):\n L = sph_loss(T, x)\n \n dr = (-1)*(-2)*np.sum( (L - T[3]) )\n \n dLa = (T[0] - x[:,0])/L\n da = 2*np.sum( (x[:,0] - T[0]) + T[3]*dLa )\n \n dLb = (T[1] - x[:,1])/L\n db = 2*np.sum( (x[:,1] - T[1]) + T[3]*dLb )\n \n dLc = (T[2] - x[:,2])/L\n dc = 2*np.sum( (x[:,2] - T[2]) + T[3]*dLc )\n \n return np.array([da, db, dc, dr])\n# %% [markdown]\n'''\n### 2.3 Gradient descent\n'''\n# %% [markdown]\n'''\nUsing the gradient of the cost function, it is now possible to optimize the best parameters with gradient descent.\n'''\n# %%\ndef grad_descent(data, param_init):\n T = param_init #initial guess\n lr = 5e-3\n it_max = 10000\n grad = [1e99, 1e99, 1e99]\n\n for it in range(it_max): \n if( abs(np.sum(grad))<1e-6 ):\n continue\n # gradient descent\n grad = derivative_cost_function(T, data)\n T = T + lr*grad\n it_max = it\n print(\"Done in %d epochs with grad: [%1.4e, %1.4e, %1.4e, %1.4e]\" %(it_max, grad[0], grad[1], grad[2], grad[3]))\n \n return T, grad\n\n# %% [markdown]\n'''\n### 2.4 Training phase\n'''\n# %% [markdown]\n'''\nWe will first generated data for a standard 3D sphere, we can use spherical coordinates to sample random points given the sphere parameters. To reduce CPU time, we will not use a high number of points.\n'''\n# %%\ndef gen_points_from_sph(model, n):\n res = int(np.ceil(math.sqrt(n)))\n theta = np.linspace(0,2*np.pi,res)\n phi = np.linspace(0,np.pi,res)\n \n x = model[3]*np.outer(np.cos(theta),np.sin(phi))\n y = model[3]*np.outer(np.sin(theta),np.sin(phi))\n z = model[3]*np.outer(np.ones(res),np.cos(phi))\n \n X = x + model[0]\n Y = y + model[1]\n Z = z + model[2]\n \n points = np.zeros((n, 3))\n points[:,0] = X.ravel()[0:n]\n points[:,1] = Y.ravel()[0:n]\n points[:,2] = Z.ravel()[0:n]\n\n return points, X, Y, Z\n# %% [markdown]\n'''\nLet's generate 250 points with gaussian noise, from an unknown sphere centered at (1,2,4) and 10 radius.\nThen, we will use 2/3 of the data and try to find these parameters. Remember that the hyper-parameters for the gradient descent are supposed to be optimized during training set.\n'''\n# %%\n#Training phase\nsph_model = [1, 2, 4, 10]\nn = 150\nsph_points = gen_points_from_sph(sph_model, n)[0] + np.random.randn(n,3)\nsph_points_train = sph_points[0:int(n/3),:]\n\nparam_init = np.array([0, 0, 0, 10]) #initial guess\n\nparam = grad_descent(sph_points_train, param_init)[0]\n\n# We use the formula for the radius\nprint(param)\n# %% [markdown]\n'''\nThe optimization returned a sphere centered at (1.21, 2.09, 3.94) with 9.94 radius.\n'''\n# %% [markdown]\n'''\n### 2.5 Testing phase\n'''\n# %% [markdown]\n\n# Now that we have an estimation on the model, we can test it and see how weel this model fits to the data. We use the 1/3 remaining points to estimate the error of the model.\n\n# The error estimation can be done using the fitting function $\\xi(\\Theta)$.\n\n# %%\n#Testing phase\nsph_points_test = sph_points[int(n/3)::,:]\nmodel_error = cost_function(sph_model, sph_points_test)/sph_points_test.shape[0]\n\nprint(\"Model has an error of %.2f per point\"%model_error)\n# %% [markdown]\n'''\nThe error per point is quite high. But considering the noise that was introduced, we see that the algorithm performs quite well. \n'''\n# %%\nmodel_error = cost_function(param, sph_points_test)/sph_points_test.shape[0]\n\nprint(\"Noise of the data %.2f per point\"%model_error)\n\n# %% [markdown]\n'''\nThis is using these error can we can optimized the hyper-parameters (optimization parameters), to find which one are best suited for the data.\n'''\n# %% [markdown]\n'''\n### 2.6 Qualitative result\n'''\n# %% [markdown]\n'''\nWe will use [plotly](https://plot.ly/python/) to render the result.\n'''\n# %%\n## Qualitative results with plotly\n_,X, Y, Z = gen_points_from_sph(param, 5*n)\n \ntrace = go.Surface(\n x=X,\n y=Y,\n z=Z,\n showscale=False,\n opacity = 0.5,\n colorscale=[[0, 'rgb(50,50,125)'], [1, 'rgb(50,50,125)']])\n\ntrace2=go.Scatter3d(\n x=sph_points[:,0].ravel(),\n y=sph_points[:,1].ravel(),\n z=sph_points[:,2].ravel(),\n mode='markers',\n name='points',\n marker=go.scatter3d.Marker(symbol='circle',\n size=3,\n color='rgb(0,0,255)',\n opacity=1),)\n\nlayout = go.Layout(\n title=\"Sphere fitting\",\n scene=go.layout.Scene(\n aspectmode = \"data\",\n xaxis_title=\"x(mm)\",\n yaxis_title=\"y(mm)\",\n zaxis_title=\"z(mm)\", \n camera=dict(center=dict(x=0.1, y=0.1, z=0))))\n\nfig = go.Figure(data=[trace, trace2], layout=layout)\nfig.show(renderer=\"iframe_connected\", config={'showLink': False})\n# %% [markdown]\n'''\n## To go further\n'''\n# %% [markdown]\n'''\nYou can look at other examples for standard mathematical models [here](https://www.geometrictools.com/Documentation/LeastSquaresFitting.pdf).\n'''\n# %% [markdown]\n'''\n## Tags\n'''\n# %% [markdown]\n'''\nData-Science; Geometry; Optimization\n'''","repo_name":"ltetrel/ltetrel.github.io","sub_path":"notebooks/sphere_fitting.py","file_name":"sphere_fitting.py","file_ext":"py","file_size_in_byte":10257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25797781300","text":"import os\n\nfrom movie_search.settings import POSTER_ROOT\n\n\nclass FileManage(object):\n def __init__(self, filename):\n self.filename = filename\n\n def upload(self, input_file):\n filename = os.path.basename(self.filename)\n filename = filename.replace(' ', '_')\n file_path = os.path.join(POSTER_ROOT, filename)\n if os.path.exists(file_path):\n filename = self.get_unique_filename(filename)\n file_path = os.path.join(POSTER_ROOT, filename)\n with open(file_path, 'wb') as output_file:\n for chunk in input_file:\n output_file.write(chunk)\n return filename\n\n def remove(self):\n file_path = os.path.join(POSTER_ROOT, self.filename)\n if os.path.exists(file_path):\n os.remove(file_path)\n return True\n return False\n\n def get_unique_filename(self, filename):\n new_name = ''\n counter = 0\n while True:\n new_name = filename.split('.')[0] + str(counter) + '.' + filename.split('.')[-1]\n file_path = os.path.join(POSTER_ROOT, new_name)\n counter += 1\n if not os.path.exists(file_path):\n break\n return new_name\n\n def get_length(self):\n file_path = os.path.join(POSTER_ROOT, self.filename)\n if os.path.exists(file_path):\n return os.path.getsize(file_path)\n return\n","repo_name":"olegkovalov/movie_search","sub_path":"movie_search/utils/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26736400147","text":"from urllib.request import FancyURLopener\nimport urllib.request\nimport urllib.parse \nimport re\n\nclass MyOpener(FancyURLopener):\n version = 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)'\n\ndef connect_to_dictionary(url):\n if not url:\n return None\n myopener = MyOpener()\n try:\n page = myopener.open(url)\n except:\n return None\n return page\n\ndef text_url(from_lang, to_lang, text):\n url = 'http://translate.google.bg/translate_t?prev=hp&hl=bg&js=y&text=' + urllib.parse.quote_plus(text) + '%0D%0A&file=&sl=' + from_lang + '&tl=' + to_lang + '&history_state0=#' \n return url\n\ndef word_url(from_lang, to_lang, word):\n address = 'http://www.google.bg/dictionary?source=translation&hl=' + to_lang + '&q=' + urllib.parse.quote_plus(word) + '&langpair=' + from_lang + '|' + to_lang\n return address\n\ndef urban_dict_url(from_lang, to_lang, word):\n address = 'http://www.urbandictionary.com/define.php?term=' + urllib.parse.quote_plus(word)\n return address\n\ndef find_from_urban_dict(from_lang, to_lang, word):\n print(\"using urban dict\")\n conn = connect_to_dictionary(urban_dict_url(from_lang, to_lang, word))\n s = ''.join((_.decode() for _ in conn.readlines()))\n match = re.search(r'(?m)(?s)
(?P.*?)
',s)\n if not match:\n return '

' + word + '

' + find_translation(from_lang, to_lang, word)\n return '

' + word + '

' + '

' + find_translation(from_lang, to_lang, re.sub(r'', '\\n', match.group('definition')))\n\n\ndef find_word(from_lang, to_lang, word):\n conn = connect_to_dictionary(word_url(from_lang, to_lang, word))\n s = ''.join((_.decode() for _ in conn.readlines()))\n match = re.search(r'(?m)(?P.*?)', s)\n if not match:\n match = re.search(r'

(?P.*?)

', s)\n if not match: \n return find_from_urban_dict(from_lang, to_lang, word)\n match = re.search(r'(?m)
\\n(?P.*?)$', s)\n return '

' + word + '

' + match.group('definition')\n\n find_transcription = re.search(r'(?m)(?P(\\[|/).*?)$', s)\n if find_transcription:\n word = '

' + match.group('definition').upper() + ' ' + find_transcription.group('definition') + '

'\n else: \n word = '

' + match.group('definition').upper() + '

'\n\n l = [word]\n s = s[match.end():]\n index = 1\n\n while True:\n match = re.search(r'(?m)(?P|title=\"Part-of-speech\">)(?P(.|\\n)+?)(?P$|)', s)\n if not match: break\n if match.group('class') == 'title=\"Part-of-speech\">':\n if s[(match.end())%len(s)] != ':':\n l.append('

' + match.group('definition') + '

')\n else:\n l.append('

' + 'Synonymous ' + match.group('definition') + 's' + '

')\n index = 1\n else:\n if match.group('end'):\n tmp = re.search(r'dict_lk\">(?P.*)',match.group('definition'))\n if tmp: \n l.append('

' + str(index) + '. ' + tmp.group('definition') + '

')\n else :\n l.append('

' + str(index) + '. ' + match.group('definition') + '

')\n index += 1\n else:\n print(match.group('definition'))\n l.append('

● ' + match.group('definition') + '

')\n s = s[match.end():]\n# l.append('proba :)')\n return ''.join(l)\n \n\ndef find_translation(from_lang, to_lang, text):\n conn = connect_to_dictionary(text_url(from_lang, to_lang, text))\n s = ''.join((_.decode() for _ in conn.readlines()))\n \n res = '

' + text + '

'\n\n match = re.search(r'(?P
  1. |)(?P.*?)
  2. ', s)\n \n if not match:\n match = re.search(r'overflow:auto\">(?P.*?)<br>', s)\n if not match:\n return res + 'Could not find definition'\n return '

    ' + re.sub('<br>', '

    ', match.group('definition')) + '

    '\n index = 1\n\n while match:\n if match.group('class') == r'
    1. ':\n res += '

      ' + str(index) + '. ' + match.group('definition') + '

      '\n index += 1\n else:\n res += '

      ' + match.group('definition') + '

      '\n index = 1\n \n s = s[match.end():]\n match = re.search(r'(?P
      1. |)(?P.*?)
      2. ', s)\n\n return res \n \n","repo_name":"ivajloip/IMDict","sub_path":"search_engine.py","file_name":"search_engine.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"4023898937","text":"# fft the shit out of everything\n\nfrom physics import mass_streamfunction\nfrom data_handling import time_means\nimport matplotlib.pyplot as plt\nimport xarray as xr\nimport numpy as np\nfrom scipy.fftpack import fft\n\n\nL = 2.500e6\ncp = 287.04/(2./7.)\ng = 9.8\nstefan = 5.6734e-8\n\ndef fft_max(var, nmax=True, period_fac=1., plttitle=None):\n if nmax:\n var_max_loc = var.lat.values[np.argmax(var.mean('lon').values, axis=1)].tolist()\n else:\n var_max_loc = var.lat.values[np.argmin(var.mean('lon').values, axis=1)].tolist()\n \n # Number of samplepoints\n N = 72*period_fac\n yf = fft(var_max_loc)\n\n plt.plot( 2.0/N * np.abs(yf[:N/2]))\n plt.xlim(0,35)\n plt.xlabel('Wavenumber')\n plt.ylabel('Amplitude')\n plt.title(plttitle)\n plt.savefig('/scratch/rg419/plots/seasons_and_rotation/'+plttitle+'_fft.png')\n plt.close()\n \n plt.plot(var_max_loc)\n plt.xlabel('Pentad')\n plt.ylabel('Latitude')\n plt.title(plttitle)\n plt.savefig('/scratch/rg419/plots/seasons_and_rotation/'+plttitle+'_mean.png')\n plt.close()\n \n\ndata = time_means('sn_3.000',[121,481], filename='atmos_pentad', timeav='pentad', period_fac=3.)\nfft_max(data.omega[:,27,:,:], nmax=False, period_fac=3., plttitle='sn_3.000_omega')\nmse = (cp*data.temp + L*data.sphum + g*data.height)/1000.\nfft_max(mse[:,38,:,:], period_fac=3., plttitle='sn_3.000_mse')\n \ndata = time_means('aquaplanet_10m',[121,481], filename='atmos_pentad', timeav='pentad')\nfft_max(data.omega[:,27,:,:], nmax=False, plttitle='aquaplanet_10m_omega')\nmse = (cp*data.temp + L*data.sphum + g*data.height)/1000.\nfft_max(mse[:,38,:,:], plttitle='aquaplanet_10m_mse')\n\ndata = time_means('aquaplanet_2m',[121,481], filename='atmos_daily', timeav='pentad')\nfft_max(data.omega[:,27,:,:], nmax=False, plttitle='aquaplanet_2m_omega')\nmse = (cp*data.temp + L*data.sphum + g*data.height)/1000.\nfft_max(mse[:,38,:,:], plttitle='aquaplanet_2m_mse')\n","repo_name":"subond/python_scripts","sub_path":"seasons_and_rotation/fourier_max.py","file_name":"fourier_max.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71731561681","text":"import tkinter as tk\nfrom tkinter import ttk\n\ndef main_window():\n root = tk.Tk() # makes base window\n title_label = ttk.Label(root, text=\"New Patient\")\n title_label.grid(column=1, row=1, sticky='w')\n mrnlabel = ttk.Label(root, text=\"Enter patient MRN: \")\n mrnentry = tk.Entry(root)\n mrnentry.grid(column=3, row=3, columnspan=2)\n mrnlabel.grid(column=1, row=3, sticky='w')\n testlabel = ttk.Label(root, text=\"Choose test: \")\n testlabel.grid(column=1, row= 5, sticky='w')\n for i in [0,2,5]:\n root.columnconfigure(i, minsize=30)\n for i in [0,2,4,6]:\n root.rowconfigure(i, minsize=30)\n\n root.mainloop() # activate and show window\n\nif __name__ == \"__main__\":\n main_window()","repo_name":"slramirez02/First","sub_path":"patient_lab_gui.py","file_name":"patient_lab_gui.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13205140707","text":"import json\nfrom urllib.parse import urlparse\nimport sqlite3\n\nfrom bertopic import BERTopic\n\n# from sentence_transformers import SentenceTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# from hdbscan import HDBSCAN\n# from umap import UMAP\n\n\n# def preprocess(docs):\n# cleaned_docs = [preprocess_string(d, CUSTOM_FILTERS) for d in docs]\n# cleaned_docs = [[lemmatizer.lemmatize(s) for s in t] for t in cleaned_docs]\n\n# return cleaned_docs\n\n\ndef bert_model(news=None):\n vectorizer_model = CountVectorizer(\n ngram_range=(1, 2), strip_accents=\"ascii\", stop_words=\"english\"\n )\n # embedding_model = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n # umap_model = UMAP(n_neighbors=5, n_components=15)\n # hdbscan_model = HDBSCAN(gen_min_span_tree=True, prediction_data=True)\n\n model = BERTopic(\n # hdbscan_model=hdbscan_model,\n # embedding_model=embedding_model,\n # umap_model=umap_model,\n vectorizer_model=vectorizer_model,\n top_n_words=10,\n language=\"english\",\n calculate_probabilities=True,\n verbose=True,\n )\n\n if news is None:\n try:\n # Connect to the database\n conn = sqlite3.connect('carbonation/resources/carbonaton.db')\n\n # Retrieve the latest key by date\n cursor = conn.execute(\"SELECT data FROM models ORDER BY id DESC LIMIT 1\")\n latest_data = cursor.fetchone()[0]\n\n # Close the connection\n conn.close()\n\n except sqlite3.Error as e:\n print(f\"An error occurred: {e}\")\n\n news = json.load(latest_data)\n\n # Enrich docs with bias\n news = enrich_docs(news)\n\n docs = []\n links = []\n for a in news[\"articles\"]:\n if a[\"excerpt\"] is not None:\n docs.append(\": \".join([a[\"title\"], a[\"excerpt\"]]))\n links.append(\n {\"domain\": a[\"clean_url\"], \"link\": a[\"link\"], \"bias\": a[\"bias\"]}\n )\n\n # Remove dupes\n # docs = list(set(docs))\n # docs = preprocess(docs)\n\n # Make model\n topics, probs = model.fit_transform(docs)\n\n # Make resultant json\n model, topic_docs = create_topic_docs(model, topics, probs, docs, links)\n\n return (model, topic_docs)\n\n\ndef create_topic_docs(model, topics, probs, docs, links):\n topic_docs = {\n topic: {\"topic\": model.get_topic(topic), \"docs\": []} for topic in set(topics)\n }\n\n for topic, doc, prob, link in zip(topics, docs, probs, links):\n if max(prob) > 0.25:\n topic_docs[topic][\"docs\"].append(\n {\"text\": doc, \"prob\": sorted(prob.tolist())[-5:][::-1], **link}\n )\n\n for topic, content in topic_docs.items():\n num_docs = len(content[\"docs\"])\n topic_docs[topic][\"num_docs\"] = num_docs\n topic_docs[topic][\"avg_bias\"] = 0\n\n if num_docs > 0:\n avg_bias = sum([doc[\"bias\"] for doc in content[\"docs\"]]) / num_docs\n topic_docs[topic][\"avg_bias\"] = avg_bias\n\n # topic_docs = sorted(topic_docs, key=lambda x: len(topic_docs[x]['docs']))\n\n # f\"resources/computed/bert_{'_'.join(news_json.split('_')[1:])}\", \"w\"\n with open(\"carbonation/resources/computed/bert_test.json\", \"w\") as f:\n f.write(json.dumps(topic_docs))\n\n return (model, topic_docs)\n\n\ndef enrich_docs(news):\n # news = json.load(open(f\"resources/articles/{news_json}\"))\n\n with open(\"carbonation/resources/bias/mbfc_bias.json\", \"r\") as f:\n mbfc_bias = json.load(f)\n\n mbfc_bias = {k: v[\"b\"] for (k, v) in mbfc_bias.items()}\n\n with open(\"carbonation/resources/bias/allsides_bias.json\", \"r\") as f:\n allsides_bias = json.load(f)[\"allsides_media_bias_ratings\"]\n\n # For AllSides compose dict\n allsides_bias = {\n \".\".join(urlparse(s[\"source_url\"]).netloc.split(\".\")[-2:]): s[\n \"media_bias_rating\"\n ]\n for s in allsides_bias\n if s[\"source_url\"] != \"\"\n }\n\n bias = mbfc_bias | allsides_bias\n\n bias_scale = {\n # \"CP\": \"Conspiracy-Pseudoscience\",\n # \"FN\": \"Questionable Sources\",\n # \"PS\": \"Pro-Science\",\n # \"S\": \"Satire\",\n \"L\": -1,\n \"Left\": -1,\n \"LC\": -0.5,\n \"Lean Left\": -0.5,\n \"C\": 0,\n \"Center\": 0,\n \"Mixed\": 0,\n \"RC\": 0.5,\n \"Lean Right\": 0.5,\n \"R\": 1,\n \"Right\": 1,\n }\n\n for a in news[\"articles\"]:\n a[\"bias\"] = 0\n\n domain = a.get(\"clean_url\", \"\")\n if domain in bias:\n a[\"bias\"] = bias_scale.get(bias[domain], 0)\n\n return news\n","repo_name":"jacksongoode/carbonation","sub_path":"carbonation/analysis/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39878925083","text":"\nfrom tkinter.tix import Tree\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nG = nx.DiGraph();\nV = {1, 2, 3, 4, 5, 6}\nE = [(1, 3), (4, 1), (4,2), (4, 5), (4, 6), (6, 1), (6, 2), (6, 4)]\nG.add_nodes_from(V)\nG.add_edges_from(E)\nprint(G.in_degree())\n\n# 1. in-degrees and out-degrees of node 2 and 6\nI2 = G.in_degree(2)\nI6 = G.in_degree(6)\nO2 = G.out_degree(2)\nO6 = G.out_degree(6)\nprint(\"\\n #1. in-degrees and out-degrees of node 2 and 6 are \", I2, \", \", I6,\", \", O2,\", \", O6, \" respectively\", \"\\n\")\n# 2. What is the shortest path from node 4 to 3?\nS = nx.shortest_path(G, 4, 3)\nprint(\"\\n #2. the shortest path from node 4 to 3 is \", S, \"\\n\")\n# 3. Is the graph a strongly connected graph?\nSC = nx.is_strongly_connected(G)\nprint(\"\\n #3. is this graph strongly connected? :\", SC, \"\\n\")\n# 4. What is the largest subgraph that is strongly connected?\nlargest = max(nx.strongly_connected_components(G), key=len)\nprint(\"\\n #4. largest connected subgraph is\", largest, \"\\n\")\n# 5. Is the graph a directed acyclic graph?\nDAG = nx.is_directed_acyclic_graph(G)\nprint(\"\\n #5. Is the graph a directed acyclic graph?: \", DAG, \"\\n\")\n# 6. What is the diameter of this graph?\n# Dm = nx.diameter(G)\nprint(\"\\n #6. diameter of this graph is - \",\"since this is not strongly connected directed network and diameter is infinite because we have nodes which has no path to other nodes ie. infinite diameter\" , \"\\n\")\n# 7. What is the edge list representation of 𝐺𝐺(𝐸𝐸, 𝑉𝑉)?\nprint( \"\\n #7. edge list of network is \", list(G.edges()))\n# 8. What is the adjacency list of 𝐺𝐺(𝐸𝐸, 𝑉𝑉)?\nprint( \"\\n #8. adjacency list of network is \", G.adj, \"\\n\")\n# 9. What is the adjacency matrix 𝐴 of 𝐺𝐺(𝐸𝐸, 𝑉𝑉)?\nA = nx.to_numpy_matrix(G) \nprint(\"\\n #9. the adjacency matrix of graph is\",\"\\n\", A, \"\\n\")\n\n# 11. What is the relationship between matrices 𝐴 , 𝐵 , and 𝐸 ?\nprint(\"\\n #11. product of out edge incidence matrix and transpose of in-edge incidence matrix gives Adjacent Matrix\", \"\\n\")\n\n# 12. Please draw the line graph of 𝐺𝐺(𝐸𝐸, 𝑉𝑉).\nplt.figure(1)\n# generating the line graph from the directed graph G\nH = nx.line_graph(G)\nnx.draw_networkx(H, arrows = True, arrowsize=10, node_size=100, with_labels = True)\nplt.show()\n\n# 13. What is the adjacency matrix 𝑀 of the line graph of 𝐺(𝐸, 𝑉)?\nAL = nx.to_numpy_matrix(H)\nprint(\"\\n #13. the adjacency matrix of line graph is\",\"\\n\", AL, \"\\n\")\n\n\n# 14. What is the relationship the matrices M , 𝐵 , and 𝐸?\nprint(\" \\n #14. product of transpose of in-edge incidence Matrix(E) and out-edge incidence matrix(E) is equal to M\")","repo_name":"pravinpoudel/ML-with-graph","sub_path":"Assignment 1/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33685089860","text":"# with the default parameters, the estimated run time is 15 hours on free colab\nimport msprime\nfrom pathlib import Path\n\n# set parameters\ntrial_number= 90000 # num of trials\nv_bin_number= 200 # num of bins\nsample_size=2 # num of individuals\nv_ploidy=1 # num of chromosomes of each type\ncombined_parameters= [300, 100, 30, 10, 1, 0] # sequence_length*recombination_rate*population_size\nsequence_lengths= [100] # sequence length\npopulation_sizes= [round(10*10**(idx/4)) for idx in range(9)] # population size\nparameter_packages= [[combined_parameter, v_sequence_length, v_population_size,\n combined_parameter/v_sequence_length/v_population_size]\n for combined_parameter in combined_parameters\n for v_sequence_length in sequence_lengths\n for v_population_size in population_sizes]\n# the third entry is recombination_rate, being the num of recombinations per base per gen\n# relevant_recombination_rate=2*sequence_length*recombination_rate\n\n# define the function that finds the least coalescent time\ndef mrca_time_api(tree_sequence):\n # ca_times= []\n ca_times= [0. for idx in range(tree_sequence.num_trees)]\n tree_counter= 0\n for tree in tree_sequence.trees():\n # ca_times.append(tree_sequence.tables.nodes[tree.root].time)\n ca_times[tree_counter]= tree_sequence.tables.nodes[tree.root].time\n tree_counter= tree_counter+ 1\n return(min(ca_times))\n\n# define the function that transforms lists to histograms\ndef list_2_histogram(f_list,bin_number):\n bin_size= max(f_list)/bin_number\n bin_locations= [(idx+0.5)*bin_size for idx in range(bin_number)]\n bin_heights= [0 for idx in range(bin_number)]\n for entry in f_list:\n bin_idx= int(entry/bin_size)\n if bin_idx== bin_number:\n bin_idx= bin_idx-1\n bin_heights[bin_idx]= bin_heights[bin_idx]+1/len(f_list)/bin_size\n return [bin_locations, bin_heights]\n\n# save the parameter packages\nsubdirectory= (\"{:.1e}\".format(trial_number)+ \"_\"+\n \"{:.1e}\".format(v_bin_number)+ \"_400/\")\nPath(subdirectory).mkdir()\nwith open(subdirectory+ \"parameter_packages.txt\", 'w') as txtfile:\n txtfile.write(str(parameter_packages))\nwith open(subdirectory+ \"combined_parameters.txt\", 'w') as txtfile:\n txtfile.write(str(combined_parameters))\nwith open(subdirectory+ \"sequence_lengths.txt\", 'w') as txtfile:\n txtfile.write(str(sequence_lengths))\nwith open(subdirectory+ \"population_sizes.txt\", 'w') as txtfile:\n txtfile.write(str(population_sizes))\n\n# run the simulation trial_number times for each parameter package\n# and save each histogram\nfor parameter_package in parameter_packages:\n # mrca_times= []\n mrca_times= [0. for idx in range(trial_number)]\n for idx in range(trial_number):\n tree_sequence_simulated= msprime.sim_ancestry(\n samples= sample_size,\n ploidy= v_ploidy,\n recombination_rate= parameter_package[3],\n sequence_length= parameter_package[1],\n population_size= parameter_package[2],\n record_full_arg= True)\n # mrca_times.append(mrca_time_api(tree_sequence_simulated)/parameter_package[2])\n mrca_times[idx]= mrca_time_api(tree_sequence_simulated)/parameter_package[2]\n histogram= list_2_histogram(mrca_times, v_bin_number)\n # with open(\"parameter_package\"+datetime.now().strftime(\"%H_%M_%S\")+\".txt\", 'w') as txtfile:\n filename= (\"{:.1e}\".format(parameter_package[0])+ \"_\"+\n \"{:.1e}\".format(parameter_package[1])+ \"_\"+\n \"{:.1e}\".format(parameter_package[2])+ \".txt\")\n with open(subdirectory+ filename, 'w') as txtfile:\n txtfile.write(str(histogram))\n","repo_name":"zhaoz0314/2-mrca","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71045733521","text":"from sincnet import (\n SincNet,\n MLP\n)\nfrom models import (\n Cnn10,\n Cnn14,\n create_ResNet50_model,\n ModifiedEfficientNet,\n ASTModel\n)\nfrom datasets import (\n CachedDataset,\n WavDataset\n)\nfrom utils import (\n disaggregated_evaluation,\n evaluate_categorical,\n transfer_features,\n LabelEncoder,\n get_output_dim,\n get_df_from_dataset,\n GrayscaleToRGB\n)\nfrom transformers import (\n ASTFeatureExtractor\n)\n\n\n#from ml_utils import get_sharpness\nfrom torch.utils.tensorboard import SummaryWriter\nimport torchvision\nimport torchvision.transforms as transforms\nimport argparse\nimport audtorch\nimport numpy as np\nimport os\nimport pandas as pd\nimport random\nimport shutil\nimport torch\nimport copy\nimport tqdm\nimport yaml\nfrom KFACPytorch import KFACOptimizer, EKFACOptimizer\nfrom sam import SAM\n#from calculate_different_sharpness_values import calculate_sharpness\nfrom gradient_descent_the_ultimate_optimizer.gdtuo import ModuleWrapper, NoOpOptimizer\nfrom torchinfo import summary\n\nimport warnings\n# Ignore Using backward() UserWarning of gdtuo\nwarnings.filterwarnings(category=UserWarning, action=\"ignore\")\n\n\ndef fix_index(df, root):\n df.reset_index(inplace=True)\n df['filename'] = df['filename'].apply(\n lambda x: os.path.join(root, x))\n df.set_index('filename', inplace=True)\n return df\n\n\ndef replace_file_path(df: pd.DataFrame, col, new_path):\n def repl(st):\n return new_path + \"/\" + st.split(\"/\")[-1]\n df[col] = df[col].apply(repl)\n return df\n\n\nclass Model(torch.nn.Module):\n def __init__(self, cnn, mlp_1, mlp_2, wlen, wshift):\n super().__init__()\n self.cnn = cnn\n self.mlp_1 = mlp_1\n self.mlp_2 = mlp_2\n self.wlen = wlen\n self.wshift = wshift\n self.output_dim = self.mlp_2.fc_lay[-1]\n\n def forward(self, x):\n # x = x.transpose(1, 2)\n if not self.training:\n x = x.unfold(1, self.wlen, self.wshift).squeeze(0)\n out = self.mlp_2(self.mlp_1(self.cnn(x)))\n if not self.training:\n out = out.mean(0, keepdim=True)\n return out\n\n\ndef train_step_gdtuo(model,\n mw: ModuleWrapper,\n criterion,\n features,\n targets,\n device,\n clip_net=1.,\n clip_opt=1.,\n ):\n # * Train Step for GDTUO Optimizer using ModelWrapper\n # ? Reference: https://github.com/kach/gradient-descent-the-ultimate-optimizer\n mw.begin()\n output = mw.forward(transfer_features(features, device))\n targets = targets.to(device)\n loss = criterion(output, targets)\n mw.zero_grad()\n loss.backward(create_graph=True) # important! use create_graph=True\n # * GDTUO needs gradient clipping, a lot of stacked optimizers cause HUUUGE gradients!\n if clip_net is not None:\n for param in mw.all_params_with_gradients:\n _clip = torch.ones_like(param.grad) * clip_net\n param.grad = torch.minimum(param.grad, _clip)\n param.grad = torch.maximum(param.grad, -_clip)\n if clip_opt is not None:\n opt = mw.optimizer\n while not isinstance(opt, NoOpOptimizer):\n for param in opt.parameters.values():\n _clip = torch.ones_like(param.grad) * clip_opt\n param.grad = torch.minimum(param.grad, _clip)\n param.grad = torch.maximum(param.grad, -_clip)\n opt = opt.optimizer\n mw.step()\n _loss = loss.item()\n # * GDTUO leaks memory, so it needs to be dealt with manually!\n opt = mw\n while not isinstance(opt, NoOpOptimizer):\n if hasattr(opt, \"all_params_with_gradients\"):\n for param in opt.all_params_with_gradients:\n param.grad = None\n opt.all_params_with_gradients.clear()\n opt = opt.optimizer\n torch.cuda.empty_cache()\n return _loss\n\n\ndef train_step_kfac(model, optimizer, criterion, features, targets, device, _epoch, _batch):\n # * Train Step for (E)KFAC Optimizer\n # ? Reference: https://github.com/alecwangcq/KFAC-Pytorch \n optimizer.zero_grad()\n output = model(transfer_features(features, device))\n targets = targets.to(device)\n loss = criterion(output, targets)\n if optimizer.steps % optimizer.TCov == 0:\n # compute true fisher\n optimizer.acc_stats = True\n with torch.no_grad():\n sampled_y = torch.multinomial(torch.nn.functional.softmax(output.cpu().data, dim=1),\n 1).squeeze().cuda()\n loss_sample = criterion(output, sampled_y)\n loss_sample.backward(retain_graph=True)\n optimizer.acc_stats = False\n optimizer.zero_grad() # clear the gradient for computing true-fisher.\n loss.backward()\n optimizer.step()\n _loss = loss.item()\n return _loss\n\n\ndef train_step_normal(model, optimizer, criterion, features, targets, device):\n # * Train Step for Torch Base Optimizers\n # print(\"-\"*50)\n # TODO: Remove this part. It's only for testing.\n # sharp = get_sharpness(mode, train_dataset)\n # print(\"Sharpness: \", sharp)\n # print(\"Feature Shapes: \", features.shape)\n output = model(transfer_features(features, device))\n targets = targets.to(device)\n loss = criterion(output, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n _loss = loss.item()\n return _loss\n\ndef train_step_SAM(model, optimizer, criterion, features, targets, device):\n # * Train Step for SAM optimizer\n output = model(transfer_features(features, device))\n targets = targets.to(device)\n # first forward-backward pass\n loss = criterion(output, targets) # use this loss for any training statistics\n loss.backward()\n optimizer.first_step(zero_grad=True)\n \n # second forward-backward pass\n output = model(transfer_features(features, device))\n targets = targets.to(device)\n loss = criterion(output, targets) # make sure to do a full forward pass\n loss.backward()\n optimizer.second_step(zero_grad=True)\n _loss = loss.item()\n return _loss\n\n\ndef run_training(args):\n def _get_device_multiprocessing(device):\n torch.cuda.set_device(torch.cuda.device(device))\n return \"cuda:\"+str(torch.cuda.current_device())\n args.device = args.device if isinstance(\n args.device, str) else _get_device_multiprocessing(args.device)\n torch.manual_seed(args.seed)\n gen_seed = torch.Generator().manual_seed(args.seed)\n np.random.seed(args.seed)\n random.seed(args.seed)\n device = args.device\n epochs = args.epochs\n experiment_folder = args.results_root\n os.makedirs(experiment_folder, exist_ok=True)\n ### DCASE\n if args.dataset == 'DCASE2020':\n def get_scene_category(x):\n if x in [\n 'airport',\n 'shopping_mall',\n 'metro_station'\n ]:\n return 'indoor'\n elif x in [\n 'park',\n 'public_square',\n 'street_pedestrian',\n 'street_traffic'\n ]:\n return 'outdoor'\n elif x in [\n 'bus',\n 'metro',\n 'tram'\n ]:\n return 'transportation'\n else:\n raise NotImplementedError(f'{x} not supported.')\n #print(\"-----------------hi---------------\")\n # old\n # df_train = pd.read_csv(\n # os.path.join(\n # args.data_root,\n # 'evaluation_setup',\n # 'fold1_train.csv'\n # ), sep='\\t').set_index('filename')\n \n # new\n df_train = pd.read_csv(\n os.path.join(\n args.data_root,\n 'evaluation_setup',\n 'fold1_train.csv'\n ), sep='\\t')\n df_train['filename'] = df_train['filename'].apply(lambda x: os.path.join(args.data_root, x))\n df_train = df_train.set_index('filename')\n\n\n df_train['scene_category'] = df_train['scene_label'].apply(\n get_scene_category)\n df_train['city'] = [\n os.path.basename(x).split('-')[1]\n for x in df_train.index.get_level_values('filename')\n ]\n df_train['device'] = [\n os.path.basename(x).split('-')[-1].split('.')[0]\n for x in df_train.index.get_level_values('filename')\n ]\n \n df_dev = pd.read_csv(\n os.path.join(\n args.data_root,\n 'evaluation_setup',\n 'fold1_evaluate.csv'\n ), sep='\\t')\n df_dev['filename'] = df_dev['filename'].apply(lambda x: os.path.join(args.data_root, x))\n df_dev = df_dev.set_index('filename')\n df_dev['scene_category'] = df_dev['scene_label'].apply(get_scene_category)\n df_dev['city'] = [\n os.path.basename(x).split('-')[1]\n for x in df_dev.index.get_level_values('filename')\n ]\n df_dev['device'] = [\n os.path.basename(x).split('-')[-1].split('.')[0]\n for x in df_dev.index.get_level_values('filename')\n ]\n \n df_test = pd.read_csv(\n os.path.join(\n args.data_root,\n 'evaluation_setup',\n 'fold1_evaluate.csv'\n ), sep='\\t')\n df_test['filename'] = df_test['filename'].apply(lambda x: os.path.join(args.data_root, x))\n df_test = df_test.set_index('filename')\n\n df_test['scene_category'] = df_test['scene_label'].apply(\n get_scene_category)\n df_test['city'] = [\n os.path.basename(x).split('-')[1]\n for x in df_test.index.get_level_values('filename')\n ]\n df_test['device'] = [\n os.path.basename(x).split('-')[-1].split('.')[0]\n for x in df_test.index.get_level_values('filename')\n ]\n\n if args.category is not None:\n df_train = df_train.loc[df_train['scene_category'] == args.category]\n df_dev = df_dev.loc[df_dev['scene_category'] == args.category]\n df_test = df_test.loc[df_test['scene_category'] == args.category]\n\n if args.exclude_cities != \"None\":\n df_train = df_train.loc[~df_train[\"city\"].isin(args.exclude_cities)]\n\n n_classes = len(df_train['scene_label'].unique())\n encoder = LabelEncoder(\n list(df_train['scene_label'].unique()))\n # old\n #features = pd.read_csv(args.features).set_index('filename')\n # new\n features = pd.read_csv(args.features)\n features['filename'] = features['filename'].apply(lambda x: os.path.join(args.data_root, x))\n features = features.set_index('filename')\n\n\n # * custom feature path support\n if args.custom_feature_path is not None:\n features = replace_file_path(\n features, \"features\", args.custom_feature_path)\n\n db_args = {\n 'features': features,\n 'target_column': 'scene_label',\n 'target_transform': encoder.encode,\n 'feature_dir': args.feature_dir\n }\n \n if args.approach == 'cnn14':\n model = Cnn14(\n output_dim=n_classes\n )\n print(\"Pretrained: \", args.pretrained)\n if args.pretrained:\n # model_old = copy.deepcopy(model)\n # sd = model_old.state_dict()\n # for params in sd:\n # print(params, sd[params]) \n checkpoint = torch.load(args.pretrained_dir + \"Cnn14_16k_mAP=0.438.pth\", map_location=torch.device(args.device))\n state_dict = checkpoint['model']\n model.load_state_dict(state_dict, strict=False)\n db_class = CachedDataset\n model.to_yaml(os.path.join(experiment_folder, 'model.yaml'))\n criterion = torch.nn.CrossEntropyLoss()\n \n elif args.approach == 'cnn10':\n model = Cnn10(\n output_dim=n_classes\n )\n print(\"Pretrained: \", args.pretrained)\n if args.pretrained:\n # model_old = copy.deepcopy(model)\n # sd = model_old.state_dict()\n # for params in sd:\n # print(params, sd[params]) \n checkpoint = torch.load(args.pretrained_dir + \"Cnn10_mAP=0.380.pth\", map_location=torch.device(args.device))\n state_dict = checkpoint['model']\n model.load_state_dict(state_dict, strict=False)\n # sd2 = model.state_dict()\n # for params in sd2:\n # print(params, sd2[params]) \n db_class = CachedDataset\n model.to_yaml(os.path.join(experiment_folder, 'model.yaml'))\n criterion = torch.nn.CrossEntropyLoss()\n elif args.approach == 'resnet50':\n model = create_ResNet50_model(n_classes, pretrained=args.pretrained)\n db_class = CachedDataset\n db_args['transform'] = transforms.Compose([GrayscaleToRGB()])\n criterion = torch.nn.CrossEntropyLoss()\n\n elif args.approach.startswith(\"efficientnet\"):\n model = ModifiedEfficientNet(n_classes, scaling_type=args.approach, pretrained=args.pretrained)\n db_class = CachedDataset\n # db_args['transform'] = transforms.Compose([GrayscaleToRGB(), transforms.ToTensor()])\n db_args['transform'] = transforms.Compose([GrayscaleToRGB()])\n # db_args['transform'] = torch.nn.Sequential(GrayscaleToRGB())\n #db_args['transform'] = torch.nn.Sequential(torchvision.transforms.v2.Grayscale(num_output_channels=3))\n\n # model.to_yaml(os.path.join(experiment_folder, 'model.yaml'))\n criterion = torch.nn.CrossEntropyLoss()\n elif args.approach == 'ast':\n model = ASTModel(\n num_hidden_layers=args.num_hidden_layers\n )\n db_class = WavDataset\n criterion = torch.nn.CrossEntropyLoss()\n feature_extractor = ASTFeatureExtractor()\n\n elif args.approach == 'sincnet':\n with open('sincnet.yaml', 'r') as fp:\n options = yaml.load(fp, Loader=yaml.Loader)\n\n feature_config = options['windowing']\n wlen = int(feature_config['fs'] * feature_config['cw_len'] / 1000.00)\n wshift = int(feature_config['fs'] *\n feature_config['cw_shift'] / 1000.00)\n\n cnn_config = options['cnn']\n cnn_config['input_dim'] = wlen\n cnn_config['fs'] = feature_config['fs']\n cnn = SincNet(cnn_config)\n\n mlp_1_config = options['dnn']\n mlp_1_config['input_dim'] = cnn.out_dim\n mlp_1 = MLP(mlp_1_config)\n\n mlp_2_config = options['class']\n mlp_2_config['input_dim'] = mlp_1_config['fc_lay'][-1]\n mlp_2 = MLP(mlp_2_config)\n model = Model(\n cnn,\n mlp_1,\n mlp_2,\n wlen,\n wshift\n )\n x = torch.rand(2, wlen)\n model.train()\n x = torch.rand(1, 44100)\n model.eval()\n # print(\"EVAL TEST:\")\n # # print(model(x).shape)\n # print()\n with open(os.path.join(experiment_folder, 'sincnet.yaml'), 'w') as fp:\n yaml.dump(options, fp)\n db_class = WavDataset\n df_train = fix_index(df_train, args.data_root)\n df_dev = fix_index(df_dev, args.data_root)\n df_test = fix_index(df_test, args.data_root)\n db_args['transform'] = audtorch.transforms.RandomCrop(wlen)\n criterion = torch.nn.NLLLoss()\n\n train_dataset = db_class(\n df_train,\n **db_args\n )\n dev_dataset = db_class(\n df_dev,\n **db_args\n )\n\n test_dataset = db_class(\n df_test,\n **db_args\n )\n elif args.dataset == \"CIFAR10\":\n # Parameters for development set\n devel_percentage = 0.2\n if args.approach in [\"cnn10\", \"cnn14\"]:\n transform = torchvision.transforms.Compose(\n [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n torchvision.transforms.Resize((64,64))\n #torchvision.transforms.Resize((1001,64))\n ])\n else:\n transform = torchvision.transforms.Compose(\n [torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n \n train_dev_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n \n generator1 = torch.Generator().manual_seed(42)\n train_dataset, dev_dataset = torch.utils.data.random_split(train_dev_dataset, [1 - devel_percentage, devel_percentage], generator=generator1)\n \n \n test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n\n df_dev = get_df_from_dataset(dev_dataset)\n df_test = get_df_from_dataset(test_dataset)\n \n encoder = LabelEncoder(\n list(test_dataset.class_to_idx.keys()))\n \n \n df_dev['label'] = df_dev['label'].apply(\n encoder.decode)\n df_test['label'] = df_test['label'].apply(\n encoder.decode)\n \n\n n_classes = len(test_dataset.class_to_idx.keys())\n input_channels = 3\n\n # So far only CNN14 and CNN10 are available\n if args.approach == 'cnn14':\n model = Cnn14(\n output_dim=n_classes,\n in_channels=input_channels\n )\n print(\"Pretrained: \", args.pretrained)\n if args.pretrained:\n # model_old = copy.deepcopy(model)\n # sd = model_old.state_dict()\n # for params in sd:\n # print(params, sd[params]) \n # TODO: not implemented\n print(\"Not implemented...\")\n # checkpoint = torch.load(args.pretrained_dir + \"Cnn14_16k_mAP=0.438.pth\", map_location=torch.device(args.device))\n # state_dict = checkpoint['model']\n # model.load_state_dict(state_dict, strict=False)\n db_class = CachedDataset\n model.to_yaml(os.path.join(experiment_folder, 'model.yaml'))\n criterion = torch.nn.CrossEntropyLoss()\n \n elif args.approach == 'cnn10':\n model = Cnn10(\n output_dim=n_classes,\n in_channels=input_channels\n )\n print(\"Pretrained: \", args.pretrained)\n if args.pretrained:\n # model_old = copy.deepcopy(model)\n # sd = model_old.state_dict()\n # for params in sd:\n # print(params, sd[params]) \n # TODO: not implemented\n print(\"Not implemented...\")\n # checkpoint = torch.load(args.pretrained_dir + \"Cnn10_mAP=0.380.pth\", map_location=torch.device(args.device))\n # state_dict = checkpoint['model']\n # model.load_state_dict(state_dict, strict=False)\n # sd2 = model.state_dict()\n # for params in sd2:\n # print(params, sd2[params]) \n db_class = CachedDataset\n model.to_yaml(os.path.join(experiment_folder, 'model.yaml'))\n criterion = torch.nn.CrossEntropyLoss()\n elif args.approach == 'resnet50':\n model = create_ResNet50_model(n_classes, pretrained=args.pretrained)\n elif args.approach.startswith(\"efficientnet\"):\n model = ModifiedEfficientNet(n_classes, scaling_type=args.approach, pretrained=args.pretrained)\n \n \n # Print a summary using torchinfo (uncomment for actual output)\n criterion = torch.nn.CrossEntropyLoss() \n print(args.approach)\n if args.state is not None:\n initial_state = torch.load(args.state)\n model.load_state_dict(\n initial_state,\n strict=False\n )\n \n x, y = train_dataset[0]\n x = np.expand_dims(x, axis=0)\n print(x.shape)\n summary(model=model, \n input_size=(x.shape), # make sure this is \"input_size\", not \"input_shape\"\n # col_names=[\"input_size\"], # uncomment for smaller output\n col_names=[\"input_size\", \"output_size\", \"num_params\", \"trainable\"],\n col_width=20,\n row_settings=[\"var_names\"]\n )\n # print(\"-\" * 50)\n # personalized_plot_model(model)\n\n if args.approach == 'sincnet':\n db_args.pop('transform')\n \n # create DataLoaders\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n shuffle=True,\n batch_size=args.batch_size,\n num_workers=4,\n generator=gen_seed\n )\n\n dev_loader = torch.utils.data.DataLoader(\n dev_dataset,\n shuffle=False,\n batch_size=1 if args.approach == 'sincnet' else args.batch_size,\n num_workers=4,\n generator=gen_seed\n )\n \n\n # df_dev = pd.DataFrame(dev_dataset.dataset)\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n shuffle=False,\n batch_size=1 if args.approach == 'sincnet' else args.batch_size,\n num_workers=4,\n generator=gen_seed\n )\n\n accuracy_history = []\n uar_history = []\n f1_history = []\n train_loss_history = []\n valid_loss_history = []\n\n if not os.path.exists(os.path.join(experiment_folder, 'state.pth.tar')):\n\n encoder.to_yaml(os.path.join(experiment_folder, 'encoder.yaml'))\n with open(os.path.join(experiment_folder, 'hparams.yaml'), 'w') as fp:\n yaml.dump(vars(args), fp)\n\n writer = SummaryWriter(log_dir=os.path.join(experiment_folder, 'log'))\n\n torch.save(\n model.state_dict(),\n os.path.join(\n experiment_folder,\n 'initial.pth.tar')\n )\n if isinstance(args.optimizer, str):\n optim = args.optimizer\n if args.optimizer == 'SGD':\n optimizer = torch.optim.SGD(\n model.parameters(),\n momentum=0.9,\n lr=args.learning_rate\n )\n elif args.optimizer == 'Adam':\n optimizer = torch.optim.Adam(\n model.parameters(),\n lr=args.learning_rate\n )\n elif args.optimizer == 'RMSprop':\n optimizer = torch.optim.RMSprop(\n model.parameters(),\n lr=args.learning_rate,\n alpha=.95,\n eps=1e-7\n )\n else:\n optimizer = args.optimizer.create(\n model, lr=args.learning_rate, device=args.device)\n\n if not \"sheduler_wrapper\" in args or args.sheduler_wrapper == None:\n sheduler = None\n elif isinstance(args.sheduler_wrapper, list):\n sheduler = []\n for sh in args.sheduler_wrapper:\n sheduler.append(sh.create(optimizer))\n else:\n sheduler = args.sheduler_wrapper.create(optimizer)\n\n \n\n max_metric = -1\n best_epoch = 0\n best_state = None\n best_results = None\n\n # accuracy_history = []\n # uar_history = []\n # f1_history = []\n # train_loss_history = []\n # valid_loss_history = []\n\n for epoch in range(epochs):\n model.to(device)\n model.train()\n epoch_folder = os.path.join(\n experiment_folder,\n f'Epoch_{epoch+1}'\n )\n os.makedirs(epoch_folder, exist_ok=True)\n\n if \"train_timer\" in args:\n args.train_timer.start()\n _loss_history = []\n for index, (features, targets) in tqdm.tqdm(\n enumerate(train_loader),\n desc=f'Epoch {epoch}',\n total=len(train_loader),\n disable=args.disable_progress_bar\n ):\n\n if (features != features).sum():\n raise ValueError(features)\n\n if isinstance(optimizer, ModuleWrapper):\n loss = train_step_gdtuo(\n model, optimizer, criterion, features, targets, device)\n elif isinstance(optimizer, (KFACOptimizer, EKFACOptimizer)):\n loss = train_step_kfac(\n model, optimizer, criterion, features, targets, device, epoch+1, index+1)\n elif isinstance(optimizer, SAM):\n loss = train_step_SAM(\n model, optimizer, criterion, features, targets, device)\n else:\n loss = train_step_normal(\n model, optimizer, criterion, features, targets, device)\n if index % 50 == 0:\n writer.add_scalar(\n 'Loss',\n loss,\n global_step=epoch * len(train_loader) + index\n )\n _loss_history.append(loss)\n \n train_loss = sum(_loss_history)/len(_loss_history)\n # print(train_loss)\n if \"train_timer\" in args:\n args.train_timer.stop()\n\n if \"valid_timer\" in args:\n args.valid_timer.start()\n ## Sharpness\n\n # sharpness_values = calculate_sharpness(model, device, train_loader, transfer_features, args.disable_progress_bar, criterion)\n # print(sharpness_values)\n \n\n\n\n # dev set evaluation\n results, _, predictions, outputs, valid_loss = evaluate_categorical(\n model,\n device,\n dev_loader,\n transfer_features,\n args.disable_progress_bar,\n criterion\n )\n results_df = pd.DataFrame(\n index=df_dev.index,\n data=predictions,\n columns=['predictions']\n )\n results_df['predictions'] = results_df['predictions'].apply(\n encoder.decode)\n results_df.reset_index().to_csv(os.path.join(epoch_folder, 'dev.csv'), index=False)\n np.save(os.path.join(epoch_folder, 'outputs.npy'), outputs)\n \n # print(results_df)\n if args.dataset == \"DCASE2020\":\n task = 'scene_label'\n stratify = ['scene_category', 'city', 'device']\n else:\n task = \"label\"\n stratify = []\n logging_results = disaggregated_evaluation(\n results_df,\n df_dev,\n task,\n stratify,\n 'categorical'\n )\n\n with open(os.path.join(epoch_folder, 'dev.yaml'), 'w') as fp:\n yaml.dump(logging_results, fp)\n for metric in logging_results.keys():\n writer.add_scalars(\n f'dev/{metric}',\n logging_results[metric],\n (epoch + 1) * len(train_loader)\n )\n\n torch.save(model.cpu().state_dict(), os.path.join(\n epoch_folder, 'state.pth.tar'))\n results[\"train_loss\"] = train_loss\n results[\"val_loss\"] = valid_loss\n \n print(f'Dev results at epoch {epoch+1}:\\n{yaml.dump(results)}')\n # save accuracy metric\n accuracy_history.append(results[\"ACC\"])\n uar_history.append(results[\"UAR\"])\n f1_history.append(results[\"F1\"])\n train_loss_history.append(train_loss)\n valid_loss_history.append(valid_loss)\n if results['ACC'] > max_metric:\n max_metric = results['ACC']\n best_epoch = epoch\n best_state = model.cpu().state_dict()\n best_results = results.copy()\n\n # plateau_scheduler.step(results['ACC'])\n if \"valid_timer\" in args:\n args.valid_timer.stop()\n train_results, _, _, _, train_loss = evaluate_categorical(\n model,\n device,\n train_loader,\n transfer_features,\n args.disable_progress_bar,\n criterion\n )\n print(f'Final Train results:\\n {yaml.dump(train_results)}')\n print(f'Final Train loss:\\n {yaml.dump(train_loss)}')\n #results[\"sharpness_value\"] = sharpness_values\n print(\"Final Train results: \", train_results)\n print(\"Final Train loss: \", train_loss)\n\n # sharpness_values = calculate_sharpness(model, device, train_loader, transfer_features, args.disable_progress_bar, criterion)\n # print(f'Sharpness:\\n{yaml.dump(results)}')\n #results[\"sharpness_value\"] = sharpness_values\n # print(\"Sharpness Value: \", sharpness_values)\n print(\n f'Best dev results found at epoch {best_epoch+1}:\\n{yaml.dump(best_results)}')\n best_results['Epoch'] = best_epoch + 1\n with open(os.path.join(experiment_folder, 'dev.yaml'), 'w') as fp:\n yaml.dump(best_results, fp)\n writer.close()\n else:\n best_state = torch.load(os.path.join(\n experiment_folder, 'state.pth.tar'))\n print('Training already run')\n epoch_folder = os.path.join(\n experiment_folder,\n f'Epoch_{epochs}'\n )\n # if args.dataset == \"DCASE2020\":\n \n print(\"saving to: \", os.path.join(experiment_folder, 'test_holistic.yaml'))\n if not os.path.exists(os.path.join(experiment_folder, 'test_holistic.yaml')):\n # if True:\n model.load_state_dict(best_state)\n test_results, targets, predictions, outputs, valid_loss = evaluate_categorical(\n model, device, test_loader, transfer_features, args.disable_progress_bar, criterion)\n print(f'Best test results:\\n{yaml.dump(test_results)}')\n torch.save(best_state, os.path.join(\n experiment_folder, 'state.pth.tar'))\n np.save(os.path.join(experiment_folder, 'targets.npy'), targets)\n np.save(os.path.join(experiment_folder, 'outputs.npy'), outputs)\n np.save(os.path.join(experiment_folder, 'predictions.npy'), outputs)\n results_df = pd.DataFrame(\n index=df_test.index,\n data=predictions,\n columns=['predictions']\n )\n results_df['predictions'] = results_df['predictions'].apply(\n encoder.decode)\n results_df.reset_index().to_csv(os.path.join(epoch_folder, 'test.csv'), index=False)\n # print(results_df)\n # print(df_test)\n with open(os.path.join(experiment_folder, 'test.yaml'), 'w') as fp:\n yaml.dump(test_results, fp)\n if args.dataset == \"DCASE2020\":\n task = 'scene_label'\n stratify = ['scene_category', 'city', 'device']\n else:\n task = \"label\"\n stratify = []\n logging_results = disaggregated_evaluation(\n results_df,\n df_test,\n task,\n stratify,\n 'categorical'\n )\n with open(os.path.join(experiment_folder, 'test_holistic.yaml'), 'w') as fp:\n yaml.dump(logging_results, fp)\n else:\n print('Evaluation already run')\n # in case we don't have any training the benchrunner doesn't make a lot of sense.\n\n return accuracy_history, uar_history, f1_history, train_loss_history, valid_loss_history\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DCASE-T1 Training')\n parser.add_argument(\n '--data-root',\n help='Path data has been extracted',\n required=True\n )\n parser.add_argument(\n '--results-root',\n help='Path where results are to be stored',\n required=True\n )\n parser.add_argument(\n '--features',\n help='Path to features',\n required=True\n )\n \n parser.add_argument(\n '--device',\n help='CUDA-enabled device to use for training',\n required=True\n )\n parser.add_argument(\n '--state',\n help='Optional initial state'\n )\n\n # TODO: is here efficientnet not supported anymore????\n parser.add_argument(\n '--approach',\n default='cnn10',\n choices=[\n 'cnn14',\n 'cnn10',\n 'sincnet',\n 'ast'\n ]\n )\n parser.add_argument(\n '--category',\n default=None,\n choices=[\n 'indoor',\n 'outdoor',\n 'transportation',\n None\n ]\n )\n parser.add_argument(\n '--batch-size',\n type=int,\n default=32,\n help='Batch size'\n )\n parser.add_argument(\n '--epochs',\n type=int,\n default=60\n )\n parser.add_argument(\n '--learning-rate',\n type=float,\n default=0.001\n )\n parser.add_argument(\n '--seed',\n type=int,\n default=0\n )\n parser.add_argument(\n '--optimizer',\n default='SGD'\n )\n parser.add_argument(\n '--feature_dir',\n default=''\n )\n\n parser.add_argument(\n '--custom-feature-path',\n help='Custom .npy location of features',\n required=False\n )\n parser.add_argument(\n '--num-hidden-layers',\n type=int,\n default=12,\n help=\"Num hidden layers to use for AST (max 12)\"\n )\n\n parser.add_argument(\n '--disable-progress-bar',\n default=False,\n type=bool,\n help='Disable tqdm progress bar while training',\n choices=[\n 'True',\n 'False',\n ]\n )\n\n parser.add_argument(\n '--exclude-cities',\n default=\"None\",\n type=str,\n help='Exclude a City from training',\n choices=[\n \"barcelona\",\n \"helsinki\",\n \"lisbon\",\n \"london\",\n \"lyon\",\n \"milan\",\n \"paris\",\n \"prague\",\n \"stockholm\",\n \"vienna\",\n \"None\",\n ]\n )\n\n args = parser.parse_args()\n run_training(args)\n","repo_name":"EIHW/ASC_Sharpness","sub_path":"training/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":34749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29029450377","text":"import pytest\n\nfrom intranet.femida.src.candidates.helpers import close_candidate\nfrom intranet.femida.src.candidates.considerations.helpers import archive_consideration\nfrom intranet.femida.src.candidates.choices import (\n SUBMISSION_SOURCES,\n SUBMISSION_STATUSES,\n CONSIDERATION_RESOLUTIONS,\n CONSIDERATION_STATUSES,\n CANDIDATE_STATUSES,\n)\nfrom intranet.femida.src.candidates.models import CandidateSubmission\nfrom intranet.femida.src.candidates.submissions.helpers import filter_submissions_by_recruiter\nfrom intranet.femida.src.offers.choices import SOURCES\n\nfrom intranet.femida.tests import factories as f\n\n\npytestmark = pytest.mark.django_db\n\n\ndef _filter_submissions_by_recruiter(recruiter):\n return filter_submissions_by_recruiter(CandidateSubmission.unsafe.all(), recruiter)\n\n\ndef test_filter_submissions_by_recruiter_new_form():\n \"\"\"\n Новые отклики из КФ на вакансии данного рекрутера\n \"\"\"\n recruiter = f.create_recruiter()\n vacancy = f.VacancyFactory()\n vacancy.set_main_recruiter(recruiter)\n submission_form = f.SubmissionFormFactory()\n submission_form.vacancies.add(vacancy)\n f.SubmissionFactory.create_batch(3) # пустышки\n expected = f.SubmissionFactory(\n source=SUBMISSION_SOURCES.form,\n status=SUBMISSION_STATUSES.new,\n form=submission_form,\n )\n result = _filter_submissions_by_recruiter(recruiter)\n assert [expected] == list(result)\n\n\ndef test_filter_submissions_by_recruiter_new_reference():\n \"\"\"\n Новые отклики-рекомендации на вакансии данного рекрутера\n \"\"\"\n recruiter = f.create_recruiter()\n vacancy = f.VacancyFactory()\n vacancy.set_main_recruiter(recruiter)\n reference = f.ReferenceFactory()\n reference.vacancies.add(vacancy)\n f.SubmissionFactory.create_batch(3) # пустышки\n expected = f.SubmissionFactory(\n source=SUBMISSION_SOURCES.reference,\n status=SUBMISSION_STATUSES.new,\n reference=reference,\n )\n result = _filter_submissions_by_recruiter(recruiter)\n assert [expected] == list(result)\n\n\ndef test_filter_submissions_by_recruiter_closed():\n \"\"\"\n Отклики, обработанные данным рекрутером\n \"\"\"\n recruiter = f.create_recruiter()\n f.SubmissionFactory.create_batch(3) # пустышки\n expected = f.SubmissionFactory(\n status=SUBMISSION_STATUSES.closed,\n responsible=recruiter,\n )\n result = _filter_submissions_by_recruiter(recruiter)\n assert [expected] == list(result)\n\n\ndef test_filter_submissions_by_recruiter_processed_reference():\n \"\"\"\n Отклики, где рекрутер обработал рекомендацию\n \"\"\"\n recruiter = f.create_recruiter()\n reference = f.ReferenceFactory(processed_by=recruiter)\n f.SubmissionFactory.create_batch(3) # пустышки\n expected = f.SubmissionFactory(reference=reference)\n result = _filter_submissions_by_recruiter(recruiter)\n assert [expected] == list(result)\n\n\ndef test_archive_consideration():\n candidate = f.create_candidate_with_consideration()\n consideration = candidate.considerations.last()\n assert consideration.state == CONSIDERATION_STATUSES.in_progress\n assert consideration.extended_status == CONSIDERATION_STATUSES.in_progress\n assert consideration.resolution == ''\n assert list(consideration.responsibles.all()) == []\n\n archive_consideration(\n consideration=consideration,\n resolution=CONSIDERATION_RESOLUTIONS.rejected_by_resume,\n )\n assert consideration.state == CONSIDERATION_STATUSES.archived\n assert consideration.extended_status == CONSIDERATION_STATUSES.archived\n assert consideration.resolution == CONSIDERATION_RESOLUTIONS.rejected_by_resume\n assert set(consideration.responsibles.all()) == set(candidate.responsibles.all())\n candidate_responsibles = dict(candidate.candidate_responsibles.values_list('user', 'role'))\n consideration_responsibles = dict(\n consideration.consideration_responsibles.values_list('user', 'role'),\n )\n assert candidate_responsibles == consideration_responsibles\n\n\ndef test_close_candidate():\n candidate = f.create_candidate_with_consideration(source_description='test')\n assert candidate.status == CANDIDATE_STATUSES.in_progress\n assert candidate.source == SOURCES.other\n assert candidate.source_description == 'test'\n assert candidate.responsibles.count() == 2\n\n close_candidate(candidate)\n assert candidate.status == CANDIDATE_STATUSES.closed\n assert candidate.source == ''\n assert candidate.source_description == ''\n assert candidate.responsibles.count() == 0\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/unit/submissions/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29181090978","text":"from __future__ import division, print_function\nimport os\nimport h5py\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom itertools import islice\nsns.set_style('ticks')\n\n\nclass RubinovAlgorithm:\n def __init__(self, hic_data, chr_number, n_iterations=1000):\n self.hic_file = hic_data\n self.chromosome_number = chr_number\n self.n_iterations = n_iterations\n # self.start = start\n # self.end = end\n self.get_hic_matrix()\n self.get_tad_orientation()\n self.get_first_level_tads()\n self.get_tads()\n\n\n def get_hic_matrix(self):\n print('Loading data...')\n hic_file = h5py.File(self.hic_file, 'r')\n chromosome = str(self.chromosome_number) + ' ' + str(self.chromosome_number)\n hic_matrix = hic_file[chromosome].value\n df = pd.DataFrame(hic_matrix)\n df = df.loc[:, (df != 0).any(axis=0)]\n df = df.loc[(df != 0).any(axis=1), :]\n hic_matrix = np.array(df)\n np.fill_diagonal(hic_matrix[1:, :], np.zeros(len(hic_matrix) - 1))\n np.fill_diagonal(hic_matrix[:, 1:], np.zeros(len(hic_matrix) - 1))\n self.hic_for_vizualization = hic_matrix\n self.hic_matrix = np.power(np.array(hic_matrix), 4)\n return self.hic_matrix\n\n def get_tad_orientation(self):\n left_summs = np.sum(np.triu(self.hic_matrix), axis=0)\n right_summs = np.sum(np.triu(self.hic_matrix), axis=1)\n self.pairs = list(zip(left_summs, right_summs))\n\n orientation = []\n for pair in self.pairs:\n if pair[0] < pair[1]:\n orientation.append(1)\n elif pair[0] > pair[1]:\n orientation.append(-1)\n self.orientation = orientation\n\n def get_first_level_tads(self):\n tads_all = []\n tads_left_pos = []\n tads_length = []\n borders = []\n new_pairs = []\n\n first_tads = []\n first_tads_left_pos = []\n first_tads_length = []\n brdr = []\n resupd = []\n numbers = iter(range(len(self.orientation)-1))\n for i in numbers:\n if self.orientation[i] == 1 and self.orientation[i + 1] == -1:\n brdr.append([i, i + 2])\n if self.pairs[i][0] + self.pairs[i + 1][0] > self.pairs[i][1] + self.pairs[i + 1][1]:\n first_tads.append(-1)\n elif self.pairs[i][0] + self.pairs[i + 1][0] < self.pairs[i][1] + self.pairs[i + 1][1]:\n first_tads.append(1)\n\n nres = [self.pairs[i][0] + self.pairs[i + 1][0], self.pairs[i][1] + self.pairs[i + 1][1]]\n resupd.append(nres)\n first_tads_left_pos.append(i)\n first_tads_length.append(2)\n next(islice(numbers, 1, 1), None)\n\n else:\n first_tads.append(self.orientation[i])\n first_tads_left_pos.append(i)\n first_tads_length.append(1)\n resupd.append(self.pairs[i])\n\n new_pairs.append(resupd)\n self.new_pairs = new_pairs\n\n tads_all.append(self.orientation)\n tads_all.append(first_tads)\n self.tads_all = tads_all\n\n borders.append(brdr)\n self.borders = borders\n\n tads_left_pos.append(first_tads_left_pos)\n self.tads_left_pos = tads_left_pos\n\n tads_length.append(first_tads_length)\n self.tads_length = tads_length\n\n def get_tads(self):\n print('Getting TADs...')\n for level in range(self.n_iterations):\n tads = []\n tads_left = []\n tads_len = []\n border = []\n updated_pairs = []\n numbers = iter(range(len(self.tads_all[level + 1])-1))\n\n if len(self.borders[level]) == 1 and self.borders[level][0][0] == 0 and\\\n self.borders[level][0][1] == len(self.hic_matrix):\n max_level = level\n print('Highest level', max_level)\n break\n\n for i in numbers:\n if self.tads_all[level + 1][i] == 1 and self.tads_all[level + 1][i + 1] == -1:\n border.append([self.tads_left_pos[level][i], self.tads_left_pos[level][i] +\n self.tads_length[level][i] + self.tads_length[level][i + 1]])\n\n if self.new_pairs[level][i][0] + self.new_pairs[level][i + 1][0] > \\\n self.new_pairs[level][i][1] + self.new_pairs[level][i + 1][1]:\n tads.append(-1)\n elif self.new_pairs[level][i][0] + self.new_pairs[level][i + 1][0] < \\\n self.new_pairs[level][i][1] + self.new_pairs[level][i + 1][1]:\n tads.append(1)\n\n n_pairs = [self.new_pairs[level][i][0] + self.new_pairs[level][i + 1][0],\n self.new_pairs[level][i][1] + self.new_pairs[level][i + 1][1]]\n\n updated_pairs.append(n_pairs)\n tads_left.append(self.tads_left_pos[level][i])\n tads_len.append(self.tads_length[level][i] + self.tads_length[level][i + 1])\n next(islice(numbers, 1, 1), None)\n\n else:\n tads.append(self.tads_all[level + 1][i])\n tads_left.append(self.tads_left_pos[level][i])\n tads_len.append(self.tads_length[level][i])\n updated_pairs.append(self.new_pairs[level][i])\n\n self.new_pairs.append(updated_pairs)\n self.tads_all.append(tads)\n self.borders.append(border)\n self.tads_left_pos.append(tads_left)\n self.tads_length.append(tads_len)\n print('Done')\n\n def mtxplot(self, letter, color):\n for j in range(len(letter)):\n bgn = letter[j][0] - self.startmtx\n end = letter[j][1] - self.startmtx\n plt.plot([bgn + 2, end + 2], [bgn, bgn], color=color)\n plt.plot([end + 2, end + 2], [bgn, end], color=color)\n\n def visualise_tree(self, start=None, end=None):\n print('Building hierarchical tree...')\n self.startmtx = start + 1\n self.endmtx = end\n plt.figure(figsize=(20, 20))\n sns.heatmap(self.hic_for_vizualization[start:end, start:end], cmap='Reds')\n for i in range(self.startmtx, self.endmtx):\n val = self.orientation[i]\n plt.text(i - self.startmtx, i + 0.8 - self.startmtx, val, {'color': 'red' if val > 0 else 'blue'})\n if len(self.borders) % 3 == 0:\n colors = ['red', 'blue', 'green'] * int(len(self.borders) / 3)\n else:\n colors = ['red', 'blue', 'green'] * int(len(self.borders) / 3)\n colors.append(['orange', 'blue', 'green'][: len(self.borders) % 3])\n for letter, color in zip(self.borders, colors):\n self.mtxplot(letter, color)\n plt.savefig(str(os.path.splitext(self.hic_file)[0]) + '_hierarhical_tree' + '.pdf', format='pdf')\n print('Done')\n\n","repo_name":"OPushkareva/Clustering-and-Comparison-of-Hierarchies-in-the-Spatial-Organization-of-Chromatin","sub_path":"RubinovAlgorithm.py","file_name":"RubinovAlgorithm.py","file_ext":"py","file_size_in_byte":7041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5509930336","text":"from time import sleep\nimport schedule\nfrom datetime import datetime, time\nfrom pytz import timezone\nfrom config import SCHEDULER_RUN_TIMES\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nSCHEDULER_TIMEZONE: str = os.getenv(\"SCHEDULER_TIMEZONE\")\n\n\nclass Scheduler:\n\n def __init__(self, bot):\n self.bot = bot\n self.timezone = timezone(SCHEDULER_TIMEZONE)\n self._schedule_test_messaging()\n\n def _job(self, text: str) -> None:\n msg = f'{datetime.now(tz=self.timezone).strftime(\"%H:%M:%S\")}\\nScheduled message:\\n{text}'\n self.bot.send_admin_message(text=msg)\n # return schedule.CancelJob\n\n def _schedule_message_daily(self, text: str, run_time: str) -> None:\n time_obj = time.fromisoformat(run_time)\n schedule.every().day.at(time_obj.strftime(\"%H:%M:%S\"), tz=self.timezone).do(self._job, text=text)\n\n def _schedule_test_messaging(self) -> None:\n t1, t2, t3 = SCHEDULER_RUN_TIMES\n self._schedule_message_daily(text='Доброе утро! Бот на связи', run_time=t1)\n self._schedule_message_daily(text='Обед! Bon appetit!', run_time=t2)\n self._schedule_message_daily(text='Рабочий день закончен. Хорошего вечера!', run_time=t3)\n\n def start(self):\n while True:\n schedule.run_pending()\n sleep(1)\n","repo_name":"cOnT3ST/-BetBot","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"1528563478","text":"import os\nimport json\nfrom controllers.helpers.dropbox import dbx\nfrom dropbox.exceptions import ApiError\n\n# Initialize elasticsearch\nfrom elasticsearch import Elasticsearch\nes = Elasticsearch(\n\tcloud_id=os.getenv('ELASTICSEARCH_CLOUD_ID'),\n basic_auth=(os.getenv('ELASTICSEARCH_USERNAME'), os.getenv('ELASTICSEARCH_PASSWORD')),\n)\n\n\n\nif not es.indices.exists(index=\"dropbox_files\"):\n\tes.indices.create(index=\"dropbox_files\")\n\n# Initialize tika for file parsing\nimport tika\ntika.initVM()\nfrom tika import parser\n\n# env vars\nfrom dotenv import load_dotenv\nload_dotenv()\n\n# Initialize Flask app\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\napp = Flask(__name__)\nCORS(app)\n\n\n# Retrieve already Stored Data from elastic search index\ndef get_old_data():\n\tdb = {}\n\tresponse = es.search(index=\"dropbox_files\", body={\"query\": {\"match_all\": {}}})\n\n\tfor x in response['hits']['hits']:\n\t\tdb[x['_id']] = x['_source']\n\treturn db\n\n# Get a list of metadata of all files in dropbox account\ndef get_new_metadata():\n\tnew_metadata = {}\n\tresult = dbx.files_list_folder(path=\"\", recursive=True)\n\tfor file in result.entries:\n\t\tif hasattr(file, 'content_hash'):\n\t\t\t# Key for elasticsearch index is the \n\t\t\tnew_metadata[file.id+\";\"+file.content_hash] = file.path_display\n\n\treturn new_metadata\n\n# Delete the files deleted from dropbox account, from the local db copy\ndef delete_old_data(db, to_delete):\n\tfor key in to_delete:\n\t\tdel db[key]\n\n# Download newly added/updated files from dropbox account & save it to local db copy\ndef save_new_files(db, to_download, new_metadata):\n\tfor key in to_download:\n\t\tmetadata, result = dbx.files_download(path=new_metadata[key])\n\t\tfile_contents = result.content\n\t\tparsed = parser.from_buffer(file_contents)\n\t\tdb[key] = [parsed['content'], metadata.path_display]\n \n# Update es index using local db copy\ndef update_search_index(db, to_download, to_delete):\n\tbulk_requests = []\n\tfor file_id in to_download:\n\t\tdoc = {\n\t\t\t'content': db[file_id][0],\n\t\t\t'path': db[file_id][1],\n\t\t}\n\t\taction= {\"index\": {\"_index\": \"dropbox_files\", \"_id\": file_id}}\n\t\tbulk_requests.append(action)\n\t\tbulk_requests.append(doc)\n\n\tfor file_id in to_delete:\n\t\tbulk_requests.append({\"delete\": {\"_index\": \"dropbox_files\", \"_id\": file_id}})\n\n\tif len(bulk_requests):\n\t\tes.bulk(operations=bulk_requests)\n\n# Fetch shareable links for all files & return the final result \ndef get_search_result(response):\n\tresult_paths = []\n\tfor x in response['hits']['hits']:\n\t\tresult_paths.append(x['_source']['path'])\n\n\tfinal_response = []\n\n\t# Fetch metadata containing links for each file from dropbox\n\tfor path in result_paths:\n\n\t\tresult = dbx.sharing_list_shared_links(path=path, direct_only=True)\n\t\tif len(result.links):\n\t\t\ttemp = result.links[0]\n\t\t\tfinal_response.append({\"link\": temp.url, \"name\": temp.name, \"path\": temp.path_lower})\n\t\n\treturn final_response\n\n\n@app.route(\"/search\", methods=['GET'])\ndef search():\n\n\t\ttry:\n\t\t\tq = request.args.get('q')\n\t\t\tif not q:\n\t\t\t\treturn jsonify({\"error\": \"no query\"}), 400\n\n\t\t\t# Get old data from elastic search index & store it in local copy\n\t\t\tdb = get_old_data();\n\n\t\t\t# Get new metadata of all files from dropbox account\n\t\t\tnew_metadata = get_new_metadata()\n\n\t\t\t# Get a list of keys in stored index and new_metadata from dropbox\n\t\t\told_keys = set(list(db.keys()))\n\t\t\tnew_keys = set(list(new_metadata.keys()))\n\n\t\t\tto_download = new_keys - old_keys\n\t\t\tto_delete = old_keys - new_keys\n\n\t\t\t# Delete keys for files that have been deleted from dropbox, from the local copy\n\t\t\tdelete_old_data(db, to_delete)\n\n\t\t\t# Download newly added files on dropbox & save it to local copy\n\t\t\tsave_new_files(db, to_download, new_metadata)\n\t\t\t\n\t\t\t# Update elastic search index using updated local copy\n\t\t\tupdate_search_index(db, to_download, to_delete)\n\n\n\t\t\t# Search files in elastic search index for given query q\n\t\t\tes.indices.refresh(index=\"dropbox_files\")\n\t\t\tresponse = es.search(index=\"dropbox_files\", body={\"query\": {\"match_phrase\": {\"content\": q}}})\n\n\t\t\t# Fetch shareable links with given file paths\n\t\t\tfinal_response = get_search_result(response)\n\n\t\texcept:\n\t\t\treturn jsonify({\"error\": \"internal server error\"}), 500\n\n # Output/send response to client\n\t\treturn jsonify(final_response), 200","repo_name":"neerizz/search-master-backend","sub_path":"controllers/dropbox_search_controller.py","file_name":"dropbox_search_controller.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73749827282","text":"from py_stealth import *\nfrom datetime import datetime as dt\nimport re\n\nRULES = {\n \"Forensics\": 0x400FF777,\n \"Cooking\": 0x400FEF2B,\n \"Inscription\": 0x400FEE9F,\n \"Mining\": 0x400FEE8E,\n \"Blacksmithing\": 0x400FEEDF,\n \"Tactics\": 0x400FEF71,\n \"Veterinary\": 0x400FEF87,\n \"Alchemy\": 0x400FEEBE,\n \"MagicResistance\": 0x400FEF07,\n \"Anatomy\": 0x400FEF51,\n \"Taming\": 0x400FEEEE,\n \"Lumberjacking\": 0x400FEF75,\n \"DetectingHidden\": 0x400FEEF8,\n \"Archery\": 0x400FEF0D,\n \"Tinkering\": 0x400FEFA1,\n \"Carpentry\": 0x400FF0CC,\n \"Tailoring\": 0x400FEFB1,\n \"ArmsLore\": 0x400FF0DB,\n \"AnimalLore\": 0x400FF08A,\n \"Stealth\": 0x400FF0F2,\n \"Fishing\": 0x400FEF93,\n \"Fencing\": 0x400FF0A6,\n \"Anatomy\": 0x400FAFF7,\n}\n\nBOOKS_CONTAINER = 0x40212B61\nBAGS_CONTAINER = 0x400FF99A\nBOOK_TYPE = 0x1C27\n\ndef get_skillbook_bonus(serial: int) -> str:\n _started = dt.now()\n if IsObjectExists(serial):\n ClickOnObject(serial)\n Wait(500)\n _journal_line = InJournalBetweenTimes(\"%\", _started, dt.now())\n if _journal_line > 0: \n _match = re.search(r\"You\\ssee:\\s\\d\\.\\d\\%\\s(.+)\",Journal(_journal_line))\n if _match: \n return _match.group(1)\n return ''\n \ndef get_container_for_skillbook(name: str) -> int:\n if name in RULES:\n return RULES[name]\n return 0 \n\ndef process():\n if FindType(BOOK_TYPE, BOOKS_CONTAINER):\n for _book in GetFindedList():\n _name = get_skillbook_bonus(_book)\n _container = get_container_for_skillbook(_name)\n if _container > 0:\n print(f\"Moving {_name}...\")\n MoveItem(_book, 0, _container, 0, 0, 0)\n Wait(1000)\n \n\n###\nif __name__ == \"__main__\":\n for container in [BOOKS_CONTAINER, BAGS_CONTAINER] + list(RULES.values()):\n UseObject(container)\n Wait(200) \n process()\n","repo_name":"it-sova/olmer-stealth-public","sub_path":"sort_by_name.py","file_name":"sort_by_name.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29060476837","text":"import pytest\n\nfrom hamcrest import assert_that, equal_to, match_equality, matches_regexp\n\nfrom billing.yandex_pay_admin.yandex_pay_admin.core.actions.document.create import CreateDocumentAction\nfrom billing.yandex_pay_admin.yandex_pay_admin.file_storage.documents import YandexPayAdminDocsFileStorage\nfrom billing.yandex_pay_admin.yandex_pay_admin.storage.entities.document import Document\n\nPATH_PREFIX = '/prefix'\nPATH_MATCHER = match_equality(matches_regexp(r'/prefix/[0-9a-f\\-]+'))\n\n\n@pytest.mark.asyncio\nasync def test_returned(partner, file_content_iter):\n document = await CreateDocumentAction(\n partner_id=partner.partner_id,\n path_prefix=PATH_PREFIX,\n original_name='file.png',\n content=file_content_iter(),\n ).run()\n\n assert_that(\n document,\n equal_to(\n Document(\n document_id=document.document_id,\n partner_id=partner.partner_id,\n path=PATH_MATCHER,\n digest='',\n name='file.png',\n created=document.created,\n updated=document.updated,\n )\n ),\n )\n\n\n@pytest.mark.asyncio\nasync def test_calls_file_storage(mocker, partner, file_content_iter, mock_storage):\n await CreateDocumentAction(\n partner_id=partner.partner_id,\n path_prefix=PATH_PREFIX,\n original_name='file.png',\n content=file_content_iter(),\n ).run()\n\n mock_storage.assert_exited_once()\n mock_storage.ctx_result.upload_stream.assert_called_once_with(PATH_MATCHER)\n mock_storage.ctx_result.upload_stream.assert_exited_once()\n mock_storage.ctx_result.upload_stream.ctx_result.write.assert_has_awaits(\n [\n mocker.call(b'chunk1'),\n mocker.call(b'chunk2'),\n ]\n )\n\n\n@pytest.fixture\ndef file_content_iter():\n async def _iter():\n yield b'chunk1'\n yield b'chunk2'\n\n return _iter\n\n\n@pytest.fixture(autouse=True)\ndef mock_storage(mocker, actx_mock):\n return mocker.patch.object(\n YandexPayAdminDocsFileStorage,\n 'acquire',\n actx_mock(\n return_value=mocker.Mock(upload_stream=actx_mock(return_value=mocker.Mock(write=mocker.AsyncMock())))\n ),\n )\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/unit/core/actions/document/test_create.py","file_name":"test_create.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4177807453","text":"#! /usr/bin/python\n#%% imports\n\nimport json\nimport os\nimport re\nimport struct\nimport pandas\nimport datetime\nimport statistics\nimport matplotlib.pyplot as plt\nimport argparse\nfrom si_prefix import si_format\nimport sys\n\n#%% args\nparser = argparse.ArgumentParser(description=\"Parse a SensorBoard can log\")\nparser.add_argument('inputFile', nargs='*', default=None, help=\"the can log to parse\")\nargs = parser.parse_args()\n\n\n#%% files\ndirname = os.path.dirname(__file__)\nconfigFolder = os.path.join(dirname, 'configs')\n# configFolder = r\"configs\"\nconfigpaths = []\n\nfor file in os.listdir(configFolder):\n if file.endswith(r\".json\"):\n configpaths.append(os.path.join(configFolder, file))\n\nprint(\"Found configs: \", configpaths)\n\n# print(json.dumps(canformat, indent=4, sort_keys=True))\n# for x in canformat:\n # print(\"{}\\n\".format(x[\"can_id\"]))\n\n#%% parse configs\nclass CanFrameInfo(object):\n def __init__(self,id,datatype,name,offset,checkBounds,minValue=None,maxValue=None,description=None):\n self.id = id\n self.description = description\n self.checkBounds = checkBounds\n self.dataTypeStr = datatype.rstrip()\n self.maxValue = maxValue\n self.minValue = minValue\n self.name = name\n self.offset = offset # Offset in bytes\n\n self.currentValue = None\n self.dataWidth = None\n self.parsestr = r''\n if (self.dataTypeStr == 'float32'):\n self.datatype = float\n self.dataWidth = 4\n self.parsestr = r'<' + r'x' * self.offset + r'f' + r'x' * (8 - self.offset - self.dataWidth)\n elif self.dataTypeStr == 'uint16':\n self.datatype = int\n self.dataWidth = 2\n self.parsestr = r'<' + r'x' * self.offset + r'H' + r'x' * (8 - self.offset - self.dataWidth)\n elif self.dataTypeStr == 'int16':\n self.datatype = int\n self.dataWidth = 2\n self.parsestr = r'<' + r'x' * self.offset + r'h' + r'x' * (8 - self.offset - self.dataWidth)\n elif self.dataTypeStr == 'uint8':\n self.datatype = int\n self.dataWidth = 1\n self.parsestr = r'<' + r'x' * self.offset + r'B' + r'x' * (8 - self.offset - self.dataWidth)\n elif self.dataTypeStr == 'uint32':\n self.datatype = int\n self.dataWidth = 4\n self.parsestr = r'<' + r'x' * self.offset + r'I' + r'x' * (8 - self.offset - self.dataWidth)\n elif self.dataTypeStr == 'bit':\n self.bitMask = 2**(7-(offset%8))\n self.offset = self.offset // 8\n self.datatype = bool\n self.dataWidth = 1\n self.parsestr = r'<' + r'x' * self.offset + r'B' + r'x' * (8 - self.offset - self.dataWidth)\n\n\n def parse(self, candata):\n candata = candata\n if len(candata) < 8:\n for _ in range(8-len(candata)):\n candata.append(0)\n if self.dataTypeStr == 'bit':\n self.currentValue = self.datatype(struct.unpack(self.parsestr, candata)[0] & self.bitMask)\n \n else:\n try:\n self.currentValue = self.datatype(struct.unpack(self.parsestr, candata)[0])\n except Exception as ex:\n print(\"Failed to parse:\", ex)\n print(f\"canid {self.id}, {self.datatype}\")\n print(\"\\t\", len(candata), candata)\n print(\"\\t\", self.parsestr)\n\n def __repr__(self):\n info = f\"\"\"id={self.id} offset={self.offset} name={self.name}\n description={self.description}\n checkBounds={self.checkBounds} minValue={self.minValue} maxValue={self.maxValue}\n parseStr={self.parsestr}\"\"\"\n\n return info\n\ndatabase = {}\n\nfor configPath in configpaths:\n with open(configPath) as f:\n canformat = json.load(f)\n\n for config in canformat:\n # print(type(config))\n if config['can_id'] not in database:\n database[config['can_id']] = []\n\n database[config['can_id']].append(CanFrameInfo(\n id=config.get('can_id', None),\n datatype=config.get('datatype', None),\n name=config.get('name', None),\n offset=config.get('offset', None),\n checkBounds=config.get('check_bounds', None),\n minValue=config.get('min_value', None),\n maxValue=config.get('max_value', None),\n description=config.get('description', None)\n ))\n\ndlcs = {}\nheaderlist = ['time']\nheaderstr = \"\"\nfor canid in sorted(database.items()):\n # print(canid, end='\\n\\n\\n')\n highestByte = 0\n for datum in canid[1]:\n offset = datum.offset\n if (datum.dataWidth + offset) > highestByte:\n highestByte = datum.dataWidth + datum.offset\n\n headerlist.append(datum.name)\n headerstr = headerstr + datum.name + \", \" \n headerstr = headerstr[:-2] # remove trailing ,\n if highestByte > 8:\n\n print(\"Too big\", canid)\n # print(canid[0], highestByte)\n # dlcs[canid[0]] = highestByte\n dlcs[canid[0]] = highestByte\n \n# print(headerstr)\nprint(\"Config parse done.\")\n\n#%%\nargs = {'inputFile': None}\nargs.inputFile = r\"C:\\Users\\benlt\\OneDrive\\School\\20211\\Solar\\incident\\sd card\\2020\\08\\19\\221358\"\nif not args.inputFile:\n driveRoot = r\"/media/ben/SENSORBOARD/\"\n\n logPath = driveRoot\n #find folder\n yearList = [x for x in os.listdir(logPath) if x.isdigit()]\n year = sorted(yearList)[-1]\n logPath = os.path.join(logPath, year)\n monthList = [x for x in os.listdir(logPath) if x.isdigit()]\n month = sorted(monthList)[-1]\n logPath = os.path.join(logPath, month)\n dayList = [x for x in os.listdir(logPath) if x.isdigit()]\n day = sorted(dayList)[-1]\n logPath = os.path.join(logPath, day)\n\n filesInFolder = [x for x in os.listdir(logPath) if not x.endswith(\".csv\")]\n\n fileName = sorted(filesInFolder)[-1]\n hour = int(fileName[0:2])\n mininute = int(fileName[2:4])\n seccond = int(fileName[4:6])\n\n (year, month, day, hour, mininute, seccond, logPath, fileName)\n\n fileLogPath = os.path.join(logPath, fileName)\nelse:\n fileLogPath = args.inputFile\n\n\n#%%\n# fileLogPath = r\"C:\\Users\\benlt\\OneDrive\\School\\20211\\Solar\\incident\\sd card\\2020\\08\\19\\221358_recon\"\nfileLogPath = r\"C:\\Users\\benlt\\OneDrive\\School\\20211\\Solar\\incident\\sd card\\recon\\221358_recon\"\n# ptrn = r\"\\s*(?P