'\n__url__ = 'https://github.com/War1oR/modelConstruct'\n\nimport conf\nimport json\nimport somelib\nimport sys\nimport urllib.request\nimport xmldict_translate # https://pypi.python.org/pypi/xmldict_translate/1.6\nimport yaml # https://pypi.python.org/pypi/PyYAML/3.11\n\nlogger = conf.logging.getLogger('model')\n\n\nclass Model():\n def __init__(self):\n self.source = {'file': self.mdict_file, 'web': self.mdict_web}\n self.format = {'json': self.parse_json, 'yaml': self.parse_yaml}\n\n @staticmethod\n def mdict_file(file):\n try:\n logger.debug('read from file')\n with open(file) as f:\n return f.read()\n except:\n logger.error(sys.exc_info()[:2])\n\n @staticmethod\n def mdict_web(web):\n try:\n logger.debug('read from web')\n return urllib.request.urlopen(web).read().decode('utf-8')\n except:\n logger.error(sys.exc_info()[:2])\n\n @staticmethod\n def parse_json(jjson):\n try:\n logger.debug('conversion json')\n return json.loads(jjson)\n except:\n logger.error(sys.exc_info()[:2])\n\n @staticmethod\n def parse_yaml(yyaml):\n try:\n logger.debug('conversion yaml')\n return yaml.load(yyaml)\n except:\n logger.error(sys.exc_info()[:2])\n\n def run(self, msource, path, mformat):\n mdict = self.source[msource](path)\n mdict = self.format[mformat](mdict)\n self.auto_create(mdict)\n\n def di(self, name, func, mformat=None, msource=None):\n if mformat:\n self.format[name] = func\n if msource:\n self.source[name] = func\n\n @staticmethod\n def factory(aclass, mdict):\n return aclass(mdict)\n\n def auto_create(self, mdict):\n for i in mdict:\n #Объекты класса именуются по названиям переменных во входящем словаре.\n #Предполагается, что в источнике данных переменные первого уровня названы подходящим образом.\n #В противном случае доступ к данным будет возможен только через globals()['class-name'].\n if isinstance(mdict[i], dict):\n globals()[i] = self.factory(ConstructClass, mdict[i])\n else:\n globals()[i] = mdict[i]\n\n\nclass ConstructClass():\n\n def __init__(self, in_dict):\n for i in in_dict:\n setattr(self, i, in_dict[i])\n\nif __name__ == '__main__':\n #Допустим требуется добавить XML\n def parse_xml(xml):\n try:\n xml = bytes([ord(x) for x in xml])\n data_in = xmldict_translate.xml2dict(xml)\n logger.debug('conversion xml')\n return data_in\n except:\n logger.error(sys.exc_info()[:2])\n test = Model()\n test1 = Model()\n test2 = Model()\n test.run('file', '../test/example/appdata.json', 'json')\n print(web_app.servlet)\n test.di('xml', parse_xml, mformat=True)\n #Или требуется добавить работу с базой, которая реализована в соседним модуле.\n test1 = somelib.MongoDoc()\n test1.select_col('students', 'grades')\n test.di('bd', test1.find_code, msource=True)\n","sub_path":"source/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"41300777","text":"from rest_framework.test import APITestCase, APIClient\nfrom rest_framework.views import status\nimport os\nfrom django.urls import reverse\nimport json\n\n\nclass SocialAuthTest(APITestCase):\n \"\"\"This class test social logins\"\"\"\n\n def setUp(self):\n\n self.client = APIClient()\n self.social_auth_url = '/api/social_auth'\n self.invalid_token = {\n \"provider\": \"google-oauth2\",\n \"access_token\": \"ya29.GlvfBkc1JwLDKzi1qhMA8qA-hZlwvHVuSQufQY6r5y4pErFbCJv8i59gyG9bJU0ZK0L6fOyJSlIU1RNhGSBw-Kiydq7p_5oTeYDUT4Qe_91dzpcd8f9b2EJ8QEOc\"\n }\n\n self.invalid_credentials = {\n \"provider\": \"google-oauth2\",\n \"access_token\": \"ya29.GlssssvfBkc1JwLDKzi1qhMA8qA-hZlwvHVuSQufQY6r5y4pErFbCJv8i59gyG9bJU0ZK0L6fOyJSlIU1RNhGSBw-Kiydq7p_5oTeYDUT4Qe_91dzpcd8f9b2EJ8QEOc\"\n }\n self.invalid_request = {\n \"provider\": \"facebook\",\n \"access_token\": \"EAAE3noOlVycBAFgl18soHGHgST5t9en7rJuvrrqugGsOn24WX6QTVwgQ0HOCqeZBNIsH7DVUVN9jm5ROHx7oHKfDba2JUTZBYZChhJIl01OWQhZAoFnKijL1hzSpobZASXXZC7RNxqxOJeW5I7KxilgSwWnztAbbUhZBc8GKjiG6qewZCJlrO5b7GmZBUTyimepcZD\"\n }\n\n self.invalid_provider = {\n \"provider\": \"invalid-provider\",\n \"access_token\": \"@#JOEJO@()#)!(JKJEWQKL@#\",\n }\n self.missing_token = {\n \"provider\": \"twitter\",\n }\n\n self.twitter_data = {\n \"provider\": \"twitter\",\n \"access_token\": os.getenv('TWITTER_ACCESS_TOKEN'),\n \"access_token_secret\": os.getenv('TWITTER_ACCESS_TOKEN_SECRET'),\n }\n\n self.facebook_data = {\n \"provider\": \"twitter\",\n \"access_token\": os.getenv('FB_ACCESS_TOKEN'),\n }\n self.google_data = {\n \"provider\": \"twitter\",\n \"access_token\": os.getenv('GOOGLE_ACCESS_TOKEN'),\n }\n\n def test_token_missing(self):\n \"\"\"Test response when token is invalid\"\"\"\n data = self.missing_token\n url = self.social_auth_url\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_invalid_provider(self):\n \"\"\"Test response when user uses an invalid provider\"\"\"\n data = self.invalid_provider\n url = self.social_auth_url\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_login_with_twitter(self):\n \"\"\"Test login/signup using twitter keys\"\"\"\n url = self.social_auth_url\n data = self.twitter_data\n response = self.client.post(url, data=data, format='json')\n data = json.loads(response.content.decode('utf-8'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('token', data[\"user\"])\n self.assertIn('email', data[\"user\"])\n self.assertIn('username', data[\"user\"])\n\n def test_invalid_token(self):\n \"\"\"Test response when token is invalid\"\"\"\n data = self.invalid_token\n url = self.social_auth_url\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_invalid_credentials(self):\n \"\"\"Test response when credentials are invalid\"\"\"\n data = self.invalid_credentials\n url = self.social_auth_url\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_invalid_http_request(self):\n \"\"\"Test response when request is invalid\"\"\"\n data = self.invalid_request\n url = self.social_auth_url\n response = self.client.post(url, data, format='json')\n data = json.loads(response.content.decode('utf-8'))\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(data[\"user\"][\"error\"], \"Http Error\")\n","sub_path":"authors/apps/authentication/tests/test_social_auth.py","file_name":"test_social_auth.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"609448203","text":"import argparse\nimport os\nimport random\nimport yaml\nimport time\nimport logging\nimport pprint\n\nimport scipy.stats as stats\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nfrom torch.autograd import grad\nfrom easydict import EasyDict\n\nfrom data.train import CreateDataLoader as train_loader\nfrom data.eval import CreateDataLoader as val_loader\nfrom data.dataloader import get_dataloader\nfrom utils import create_logger, save_checkpoint, load_state, get_scheduler, AverageMeter, calculate_fid\nfrom models.standard import *\n\nparser = argparse.ArgumentParser(description='PyTorch Colorization Training')\n\nparser.add_argument('--config', default='experiments/origin/config.yaml')\nparser.add_argument('--resume', default='', type=str, help='path to checkpoint')\n\nbatch_size = 16\ndef calc_gradient_penalty(netD, real_data, fake_data, sketch_feat):\n alpha = torch.rand(batch_size, 1, 1, 1, device=config.device)\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n interpolates.requires_grad = True\n\n disc_interpolates = netD(interpolates, sketch_feat)\n\n gradients = grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size(), device=config.device), create_graph=True,\n retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * config.gpW\n return gradient_penalty\n\n\ndef mask_gen():\n maskS =256 // 4\n\n mask1 = torch.cat(\n [torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(batch_size // 2)], 0)\n mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(batch_size // 2)], 0)\n mask = torch.cat([mask1, mask2], 0)\n\n return mask.to(config.device)\n\n\ndef main():\n global args, config, X\n\n args = parser.parse_args()\n print(args)\n\n with open(args.config) as f:\n config = EasyDict(yaml.load(f))\n\n config.save_path = os.path.dirname(args.config)\n\n ####### regular set up\n assert torch.cuda.is_available()\n device = torch.device(\"cuda\")\n config.device = device\n\n # random seed setup\n print(\"Random Seed: \", config.seed)\n random.seed(config.seed)\n torch.manual_seed(config.seed)\n torch.cuda.manual_seed(config.seed)\n cudnn.benchmark = True\n\n ####### regular set up end\n\n netG = NetG(ngf=config.ngf)\n netD = NetD(ndf=config.ndf)\n\n netF = NetF()\n netI = NetI().eval()\n\n # netG = torch.nn.DataParallel(NetG(ngf=config.ngf))\n # netD = torch.nn.DataParallel(NetD(ndf=config.ndf))\n\n # netF = torch.nn.DataParallel(NetF())\n # netI = torch.nn.DataParallel(NetI()).eval()\n for param in netF.parameters():\n param.requires_grad = False\n\n criterion_MSE = nn.MSELoss()\n\n fixed_sketch = torch.tensor(0, device=device).float()\n fixed_hint = torch.tensor(0, device=device).float()\n fixed_sketch_feat = torch.tensor(0, device=device).float()\n\n ####################\n netD = netD.to(device)\n netG = netG.to(device)\n netF = netF.to(device)\n netI = netI.to(device)\n criterion_MSE = criterion_MSE.to(device)\n\n # setup optimizer\n\n optimizerG = optim.Adam(netG.parameters(), lr=config.lr_scheduler.base_lr, betas=(0.5, 0.9))\n optimizerD = optim.Adam(netD.parameters(), lr=config.lr_scheduler.base_lr, betas=(0.5, 0.9))\n\n last_iter = -1\n best_fid = 1e6\n\n if args.resume:\n best_fid, last_iter = load_state(args.resume, netG, netD, optimizerG, optimizerD)\n\n config.lr_scheduler['last_iter'] = last_iter\n\n config.lr_scheduler['optimizer'] = optimizerG\n lr_schedulerG = get_scheduler(config.lr_scheduler)\n config.lr_scheduler['optimizer'] = optimizerD\n lr_schedulerD = get_scheduler(config.lr_scheduler)\n\n tb_logger = SummaryWriter(config.save_path + '/events')\n logger = create_logger('global_logger', config.save_path + '/log.txt')\n logger.info(f'args: {pprint.pformat(args)}')\n logger.info(f'config: {pprint.pformat(config)}')\n\n batch_time = AverageMeter(config.print_freq)\n data_time = AverageMeter(config.print_freq)\n flag = 1\n mu, sigma = 1, 0.005\n X = stats.truncnorm((0 - mu) / sigma, (1 - mu) / sigma, loc=mu, scale=sigma)\n i = 0\n curr_iter = last_iter + 1\n\n #dataloader = train_loader(config)\n dataloader = get_dataloader('yumi', 'train', batch_size=batch_size)\n data_iter = iter(dataloader)\n\n end = time.time()\n while i < len(dataloader):\n lr_schedulerG.step(curr_iter)\n lr_schedulerD.step(curr_iter)\n current_lr = lr_schedulerG.get_lr()[0]\n ############################\n # (1) Update D network\n ###########################\n for p in netD.parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n for p in netG.parameters():\n p.requires_grad = False # to avoid computation ft_params\n\n # train the discriminator Diters times\n j = 0\n while j < config.diters:\n netD.zero_grad()\n\n i += 1\n j += 1\n\n data_end = time.time()\n real_cim, real_vim, real_sim = data_iter.next()\n data_time.update(time.time() - data_end)\n\n real_cim, real_vim, real_sim = real_cim.to(device), real_vim.to(device), real_sim.to(device)\n mask = mask_gen()\n \n hint = torch.cat((real_vim * mask, mask), 1)\n\n # train with fake\n with torch.no_grad():\n feat_sim = netI(real_sim).detach()\n fake_cim = netG(real_sim, hint, feat_sim).detach()\n\n errD_fake = netD(fake_cim, feat_sim)\n errD_fake = errD_fake.mean(0).view(1)\n\n errD_fake.backward(retain_graph=True) # backward on score on real\n\n errD_real = netD(real_cim, feat_sim)\n errD_real = errD_real.mean(0).view(1)\n errD = errD_real - errD_fake\n\n errD_realer = -1 * errD_real + errD_real.pow(2) * config.drift\n\n errD_realer.backward(retain_graph=True) # backward on score on real\n\n gradient_penalty = calc_gradient_penalty(netD, real_cim, fake_cim, feat_sim)\n gradient_penalty.backward()\n\n optimizerD.step()\n\n ############################\n # (2) Update G network\n ############################\n\n for p in netD.parameters():\n p.requires_grad = False # to avoid computation\n for p in netG.parameters():\n p.requires_grad = True\n netG.zero_grad()\n\n data = data_iter.next()\n real_cim, real_vim, real_sim = data\n i += 1\n\n real_cim, real_vim, real_sim = real_cim.to(device), real_vim.to(device), real_sim.to(device)\n\n if flag: # fix samples\n mask = mask_gen()\n hint = torch.cat((real_vim * mask, mask), 1)\n with torch.no_grad():\n feat_sim = netI(real_sim).detach()\n\n tb_logger.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=4))\n tb_logger.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=4))\n tb_logger.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=4))\n\n fixed_sketch.resize_as_(real_sim).copy_(real_sim)\n fixed_hint.resize_as_(hint).copy_(hint)\n fixed_sketch_feat.resize_as_(feat_sim).copy_(feat_sim)\n\n flag -= 1\n\n mask = mask_gen()\n hint = torch.cat((real_vim * mask, mask), 1)\n\n with torch.no_grad():\n feat_sim = netI(real_sim).detach()\n\n fake = netG(real_sim, hint, feat_sim)\n\n errd = netD(fake, feat_sim)\n errG = errd.mean() * config.advW * -1\n errG.backward(retain_graph=True)\n feat1 = netF(fake)\n with torch.no_grad():\n feat2 = netF(real_cim)\n\n contentLoss = criterion_MSE(feat1, feat2)\n contentLoss.backward()\n\n optimizerG.step()\n batch_time.update(time.time() - end)\n\n ############################\n # (3) Report & 100 Batch checkpoint\n ############################\n curr_iter += 1\n\n if curr_iter % config.print_freq == 0:\n tb_logger.add_scalar('VGG MSE Loss', contentLoss.item(), curr_iter)\n tb_logger.add_scalar('wasserstein distance', errD.item(), curr_iter)\n tb_logger.add_scalar('errD_real', errD_real.item(), curr_iter)\n tb_logger.add_scalar('errD_fake', errD_fake.item(), curr_iter)\n tb_logger.add_scalar('Gnet loss toward real', errG.item(), curr_iter)\n tb_logger.add_scalar('gradient_penalty', gradient_penalty.item(), curr_iter)\n tb_logger.add_scalar('lr', current_lr, curr_iter)\n logger.info(f'Iter: [{curr_iter}/{len(dataloader)//(config.diters+1)}]\\t'\n f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n f'errG {errG.item():.4f}\\t'\n f'errD {errD.item():.4f}\\t'\n f'err_D_real {errD_real.item():.4f}\\t'\n f'err_D_fake {errD_fake.item():.4f}\\t'\n f'content loss {contentLoss.item():.4f}\\t'\n f'LR {current_lr:.4f}'\n )\n\n if curr_iter % config.print_img_freq == 0:\n with torch.no_grad():\n fake = netG(fixed_sketch, fixed_hint, fixed_sketch_feat)\n tb_logger.add_image('colored imgs',\n vutils.make_grid(fake.detach().mul(0.5).add(0.5), nrow=4),\n curr_iter)\n\n if curr_iter % config.val_freq == 0:\n fid, var = validate(netG, netI)\n tb_logger.add_scalar('fid_val', fid, curr_iter)\n tb_logger.add_scalar('fid_variance', var, curr_iter)\n logger.info(f'fid: {fid:.3f} ({var})\\t')\n\n # remember best fid and save checkpoint\n is_best = fid < best_fid\n best_fid = min(fid, best_fid)\n save_checkpoint({\n 'step': curr_iter - 1,\n 'state_dictG': netG.state_dict(),\n 'state_dictD': netD.state_dict(),\n 'state_dictI': netI.state_dict(),\n 'best_fid': best_fid,\n 'optimizerG': optimizerG.state_dict(),\n 'optimizerD': optimizerD.state_dict(),\n }, is_best, config.save_path + '/ckpt')\n\n end = time.time()\n\n # if curr_iter == 200:\n # print(\"Epoch 200 FInish!!!!!!!!!\")\n # break\n\n\ndef validate(netG, netI):\n fids = []\n fid_value = 0\n for _ in range(3):\n fid = calculate_fid(netG, netI, get_dataloader('yumi', 'val', batch_size=batch_size), config, 2048)\n print('FID: ', fid)\n fid_value += fid\n fids.append(fid)\n fid_value /= 3\n return fid_value, np.var(fids)\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"589390809","text":"import requests\nimport re\nimport time\nimport datetime\nimport csv\n\ndef receiver_html(idNumber, firstDay, lastDay, month, year):\n '''Получение html страницы с раздела сайта \"Погода и климат\" --> \"Архив погоды\". id следует взять из адресной строки браузера после перехода\n к нужному населенному пункту.\n '''\n params = {'id':idNumber, 'bday':str(firstDay), 'fday':str(lastDay), 'amonth':str(month), 'ayear':str(year), 'bot':'2'}\n r = requests.get('http://www.pogodaiklimat.ru/weather.php', params)\n r.encoding = 'utf-8'\n return(r.text)\n\ndef parser_html(t):\n # Выпиливаем нужное из таблицы html файла. То, что нужно, по порядку перечислено в listOfParams.\n tableInString = t[t.find('').strip('\\n').split('')\n del tableInString[-1]\n tableInString = [i.split('| ', ''), i[2].replace('', ''), i[3], i[4],\n i[6].replace('', ''),\n i[8].replace('', ''), i[10], i[15], i[16].replace('', ''),\n i[17].replace('', ''), i[18], i[20]] for i in tableInString]\n\n # Выборка данных и заполнение итогового списка.\n listOfData = []\n n = 0\n for i in tableInString:\n listOfData.append([])\n for j in i:\n data = j[j.find('>') + 1:j.find('<')]\n listOfData[n].append(data)\n if i.index(j) == 10: # Получение данных по виду снежного покрова из последней строки.\n dataSnow = j[j.find('\"') + 1:j.find('\" ')]\n listOfData[n].append(dataSnow)\n n += 1\n\n # Взятие поправки к UTC из html страницы.\n stringUTC = t[t.find(' Внимание!'):t.find('ч. ')]\n stringUTC = stringUTC.rstrip()\n regexes = [re.compile(str(i)) for i in range(13)]\n \n for regex in regexes:\n if regex.search(stringUTC):\n deltaTime = int(regex.pattern)\n \n return (listOfData, deltaTime)\n\n\n\n# ------------------- MAIN SECTION -------------------\nlistOfParam = ['Час по местному времени', 'День.Месяц.Год', 'Направление ветра', 'V ветра, м/с', 'Явления', 't воздуха, `C', 'Влажность, %',\n 'Давление воздуха на высоте места измерения над уровнем моря, мм рт. ст.', 'min t воздуха, `C', 'max t воздуха, `C',\n 'Кол��чество осадков за последние 12ч, мм', 'Высота снежного покрова, см', 'Состояние снега, величина покрытия местности в баллах']\n\nprint('''\\n\n +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n Утилита получения архивных данных о погоде с сайта \"Погода и климат\" /www.pogodaiklimat.ru/\n разработчик Кузовлев Александр /kav.develop@yandex.ru/\n с. Ленинское, Новосибирского р-на\n вер. 1.0, январь 2019 г.\n +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\\\n ''')\n\nprint('''\nДля запроса информации вам нужно узнать id населенного пунка или метеостанции.\nДля этого следует перейти к разделу сайта: \"Погода и климат\" --> \"Архив погоды\" --> Объект (населенный пункт, метеостанция).\nid следует взять из адресной строки браузера после перехода к нужному объекту.\nПо умолчанию будет использован id=29635, для метеостанции \"Обская ГМО\" (Новосибрская обл.)\n''')\n\nwhile True:\n idNumber = input('Введите id объекта: ')\n if idNumber == '':\n idNumber = '29635'\n break\n if idNumber.isdigit():\n break\n print('Внимание! Допустимо только целое число.')\n\nprint('Введите интересующий вас период:')\ntoday = datetime.date.today()\n\nwhile True:\n year = input('Год (не ранее 2011 года): ')\n month = input('Месяц (цифрой): ')\n firstDay = input('Начальное число периода: ')\n lastDay = input('Конечное число периода: ')\n if year.isdigit() and month.isdigit() and firstDay.isdigit() and lastDay.isdigit():\n year = int(year)\n month = int(month)\n firstDay = int(firstDay)\n lastDay = int(lastDay)\n if year < 2011:\n print('За этот год данных нет.')\n continue\n elif month < 1 or firstDay < 1 or lastDay < 1 or month > 12 or firstDay > 31 or lastDay > 31:\n print('Такой календарной даты нет.')\n continue\n elif firstDay > lastDay:\n print('Начальная дата периода должна быть меньше конечной.')\n continue\n elif datetime.date(year, month, firstDay) > today:\n print('За этот период данных еще нет.')\n continue\n break\n\nt = receiver_html(idNumber, firstDay, lastDay, month, year)\ndataFromParser = parser_html(t)\nlistOfData, deltaTime = dataFromParser[0], dataFromParser[1]\n\n# Поправка на местное время c коррекцией даты. Первод давления из ГПа в мм рт. ст.\nfor i in listOfData:\n data = i[1].split('.')\n timeEpoch = time.mktime((int(year), int(data[1]), int(data[0]), int(i[0]) + deltaTime, 0, 0, 0, 0, 0))\n parsedTime = time.strptime(time.ctime(timeEpoch))\n i[0] = str(parsedTime.tm_hour)\n i[1] = str(parsedTime.tm_mday) + '.' + str(parsedTime.tm_mon) + '.' + str(parsedTime.tm_year)\n i[6] = '%.1f' % (float(i[6]) * 0.75) # перевод единиц давления\n\n# Запись в csv файл.\nwith open('arhive.csv', 'w') as arhFile:\n writer = csv.DictWriter(arhFile, fieldnames=listOfParam)\n writer.writeheader()\n for i in listOfData:\n writer.writerow(dict(zip(listOfParam, i)))\n\nprint('OK!')\nprint('Откройте файл arhive.csv')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"77814969","text":"import core\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef scrape(url):\r\n try:\r\n raw_html = core.simple_get(url)\r\n html = BeautifulSoup(raw_html, 'html.parser')\r\n \r\n rawChapters = html.findAll(\"div\", {\"class\": \"info_text_dt\"})\r\n chapters = []\r\n for rawChapter in rawChapters: \r\n rawUrl = rawChapter.find(\"a\", href=True)\r\n rawDate = rawChapter.find(\"p\", {\"class\": \"text-center\"})\r\n\r\n chapter = core.Chapter(rawUrl.getText(), rawUrl['href'], rawDate.getText())\r\n\r\n chapters.append(chapter)\r\n\r\n return chapters\r\n\r\n except Exception as e:\r\n core.log_error('Error during parse request {0}'.format(str(e)))\r\n return None","sub_path":"get_truyen_qq.py","file_name":"get_truyen_qq.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"276825654","text":"import wx\r\nimport wx.calendar\r\nimport wx.lib.scrolledpanel\r\nimport MissionOptions as MO\r\n\r\n\r\nclass GlobalOptionsPanel(wx.lib.scrolledpanel.ScrolledPanel):\r\n def __init__(self, parent):\r\n\r\n wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent)\r\n \r\n globaloptionsgrid = wx.FlexGridSizer(20,2,5,5)\r\n self.lblMissionName = wx.StaticText(self, -1, \"Mission Name\")\r\n self.txtMissionName = wx.TextCtrl(self, -1, \"mission_name\", size=(500,-1))\r\n\r\n self.lblMissionType = wx.StaticText(self, -1, \"Mission Type\")\r\n phasetypes = ['0: MGA','1: MGA-DSM','2: MGA-LT','3: FBLT','4: MGA-NDSM (experimental)','5: PSBI']\r\n #,'6: solver chooses (MGA, MGA-DSM)','7: solver chooses (MGA, MGA-LT)',\r\n #'8: solver chooses (MGA-DSM, MGA-LT)','9: solver chooses (MGA, MGA-DSM, MGA-LT)']\r\n self.cmbMissionType = wx.ComboBox(self, -1, choices=phasetypes, style=wx.CB_READONLY)\r\n\r\n self.lblmaximum_number_of_lambert_revolutions = wx.StaticText(self, -1, \"Maximum number of revolutions for solving Lambert's problem\")\r\n self.txtmaximum_number_of_lambert_revolutions = wx.TextCtrl(self, -1, \"maximum_number_of_lambert_revolutions\")\r\n\r\n self.lblobjective_type = wx.StaticText(self, -1, \"Objective function\")\r\n objectivetypes = ['0: minimum deltaV','1: minimum time','2: maximum final mass','3: GTOC 1 asteroid deflection function',\r\n '4: launch as late as possible in the window','5: launch as early as possible in the window',\r\n '6: maximize orbit energy','7: minimize launch mass','8: arrive as early as possible',\r\n '9: arrive as late as possible','10: minimum propellant (not the same as 2)','11: maximum dry/wet ratio',\r\n '12: maximum arrival kinetic energy', '13: minimum BOL power']\r\n self.cmbobjective_type = wx.ComboBox(self, -1, choices=objectivetypes, style = wx.CB_READONLY)\r\n\r\n self.lblinclude_initial_impulse_in_cost = wx.StaticText(self, -1, \"Include initial impulse in cost\")\r\n self.chkinclude_initial_impulse_in_cost = wx.CheckBox(self, -1)\r\n\r\n self.lblmax_phases_per_journey = wx.StaticText(self, -1, \"Maximum number of phases per journey\")\r\n self.txtmax_phases_per_journey = wx.TextCtrl(self, -1, \"max_phases_per_journey\")\r\n \r\n self.lbllaunch_window_open_date = wx.StaticText(self, -1, \"Launch window open date\")\r\n self.txtlaunch_window_open_date = wx.TextCtrl(self, -1, \"launch_window_open_date\")\r\n self.LaunchDateCalendar = wx.calendar.CalendarCtrl(self, -1)\r\n calendarbox = wx.BoxSizer(wx.HORIZONTAL)\r\n calendarbox.AddMany([self.txtlaunch_window_open_date, self.LaunchDateCalendar])\r\n \r\n self.lblnum_timesteps = wx.StaticText(self, -1, \"Number of time-steps\")\r\n self.txtnum_timesteps = wx.TextCtrl(self, -1, \"num_timesteps\")\r\n\r\n self.lblstep_size_distribution = wx.StaticText(self, -1, \"Step size distribution\")\r\n distributionchoices = [\"Uniform\",\"Gaussian\",\"Cauchy\"]\r\n self.cmbstep_size_distribution = wx.ComboBox(self, -1, choices = distributionchoices, style=wx.CB_READONLY)\r\n\r\n self.lblstep_size_stdv_or_scale = wx.StaticText(self, -1, \"Scale width/standard deviation\")\r\n self.txtstep_size_stdv_or_scale = wx.TextCtrl(self, -1, \"step_size_stdv_or_scale\")\r\n\r\n self.lblcontrol_coordinate_system = wx.StaticText(self, -1, \"Control coordinate system\")\r\n control_coordinate_choices = ['Cartesian','Polar']\r\n self.cmbcontrol_coordinate_system = wx.ComboBox(self, -1, choices = control_coordinate_choices, style=wx.CB_READONLY)\r\n \r\n globaloptionsgrid.AddMany( [self.lblMissionName, self.txtMissionName,\r\n self.lblMissionType, self.cmbMissionType,\r\n self.lblmaximum_number_of_lambert_revolutions, self.txtmaximum_number_of_lambert_revolutions,\r\n self.lblobjective_type, self.cmbobjective_type,\r\n self.lblinclude_initial_impulse_in_cost, self.chkinclude_initial_impulse_in_cost,\r\n self.lblmax_phases_per_journey, self.txtmax_phases_per_journey,\r\n self.lbllaunch_window_open_date, calendarbox,\r\n self.lblnum_timesteps, self.txtnum_timesteps,\r\n self.lblstep_size_distribution, self.cmbstep_size_distribution,\r\n self.lblstep_size_stdv_or_scale, self.txtstep_size_stdv_or_scale,\r\n self.lblcontrol_coordinate_system, self.cmbcontrol_coordinate_system])\r\n globaloptionsgrid.SetFlexibleDirection(wx.BOTH)\r\n\r\n #constraint fields\r\n constraintgrid = wx.FlexGridSizer(20, 2, 5, 5)\r\n\r\n self.lblDLA_bounds = wx.StaticText(self, -1, \"DLA bounds (degrees)\")\r\n self.txtDLA_bounds_lower = wx.TextCtrl(self, -1, \"DLA_bounds[0]\")\r\n self.txtDLA_bounds_upper = wx.TextCtrl(self, -1, \"DLA_bounds[1]\")\r\n DLAbox = wx.BoxSizer(wx.HORIZONTAL)\r\n DLAbox.AddMany([self.txtDLA_bounds_lower, self.txtDLA_bounds_upper])\r\n\r\n self.lblglobal_timebounded = wx.StaticText(self, -1, \"Enable mission time bounds\")\r\n self.chkglobal_timebounded = wx.CheckBox(self, -1)\r\n\r\n self.lbltotal_flight_time_bounds = wx.StaticText(self, -1, \"Global flight time bounds\")\r\n self.txttotal_flight_time_bounds_lower = wx.TextCtrl(self, -1, \"total_flight_time_bounds[0]\")\r\n self.txttotal_flight_time_bounds_upper = wx.TextCtrl(self, -1, \"total_flight_time_bounds[1]\")\r\n GlobalTimebox = wx.BoxSizer(wx.HORIZONTAL)\r\n GlobalTimebox.AddMany([self.txttotal_flight_time_bounds_lower, self.txttotal_flight_time_bounds_upper])\r\n\r\n self.lblforced_post_launch_coast = wx.StaticText(self, -1, \"Forced post-launch coast duration (days)\")\r\n self.txtforced_post_launch_coast = wx.TextCtrl(self, -1, \"forced_post_launch_coast\")\r\n\r\n self.lblforced_flyby_coast = wx.StaticText(self, -1, \"Forced pre/post-flyby coast duration (days)\")\r\n self.txtforced_flyby_coast = wx.TextCtrl(self, -1, \"forced_post_launch_coast\")\r\n \r\n self.lblinitial_V_infinity = wx.StaticText(self, -1, \"Initial V-infinity in MJ2000 km/s\")\r\n self.txtinitial_V_infinity_x = wx.TextCtrl(self, -1, \"initial_V_infinity[0]\")\r\n self.txtinitial_V_infinity_y = wx.TextCtrl(self, -1, \"initial_V_infinity[1]\")\r\n self.txtinitial_V_infinity_z = wx.TextCtrl(self, -1, \"initial_V_infinity[2]\")\r\n initial_V_infinity_box = wx.BoxSizer(wx.HORIZONTAL)\r\n initial_V_infinity_box.AddMany([self.txtinitial_V_infinity_x, self.txtinitial_V_infinity_y, self.txtinitial_V_infinity_z])\r\n\r\n self.lblminimum_dry_mass = wx.StaticText(self, -1, \"Minimum dry mass (kg)\")\r\n self.txtminimum_dry_mass = wx.TextCtrl(self, -1, \"minimum_dry_mass\")\r\n\r\n self.lblpost_mission_delta_v = wx.StaticText(self, -1, \"Post-mission delta-v (km/s)\")\r\n self.txtpost_mission_delta_v = wx.TextCtrl(self, -1, \"post_mission_delta_v\")\r\n\r\n constraintgrid.AddMany([self.lblDLA_bounds, DLAbox,\r\n self.lblglobal_timebounded, self.chkglobal_timebounded,\r\n self.lbltotal_flight_time_bounds, GlobalTimebox,\r\n self.lblforced_post_launch_coast, self.txtforced_post_launch_coast,\r\n self.lblforced_flyby_coast, self.txtforced_flyby_coast,\r\n self.lblinitial_V_infinity, initial_V_infinity_box,\r\n self.lblminimum_dry_mass, self.txtminimum_dry_mass,\r\n self.lblpost_mission_delta_v, self.txtpost_mission_delta_v])\r\n\r\n vboxleft = wx.BoxSizer(wx.VERTICAL)\r\n vboxright = wx.BoxSizer(wx.VERTICAL)\r\n lblLeftTitle = wx.StaticText(self, -1, \"Global mission options\")\r\n lblRightTitle = wx.StaticText(self, -1, \"Global mission constraints\")\r\n vboxleft.Add(lblLeftTitle)\r\n vboxleft.Add(globaloptionsgrid)\r\n vboxright.Add(lblRightTitle)\r\n vboxright.Add(constraintgrid)\r\n \r\n font = self.GetFont()\r\n font.SetWeight(wx.FONTWEIGHT_BOLD)\r\n lblLeftTitle.SetFont(font)\r\n lblRightTitle.SetFont(font)\r\n\r\n self.mainbox = wx.BoxSizer(wx.HORIZONTAL)\r\n \r\n self.mainbox.Add(vboxleft)\r\n self.mainbox.AddSpacer(20)\r\n self.mainbox.Add(vboxright)\r\n\r\n self.SetSizer(self.mainbox)\r\n self.SetupScrolling()\r\n\r\n\r\nclass SpacecraftOptionsPanel(wx.lib.scrolledpanel.ScrolledPanel):\r\n def __init__(self, parent):\r\n \r\n wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent)\r\n \r\n #spacecraft and launch vehicle fields\r\n spacecraftgrid = wx.FlexGridSizer(20,2,5,5)\r\n spacecraftgridtitle = wx.StaticText(self, -1, \"Spacecraft and Launch Vehicle options\")\r\n\r\n self.lblmaximum_mass = wx.StaticText(self, -1, \"Maximum mass\")\r\n self.txtmaximum_mass = wx.TextCtrl(self, -1, \"maximum_mass\")\r\n \r\n self.lblallow_initial_mass_to_vary = wx.StaticText(self, -1, \"Allow initial mass to vary\")\r\n self.chkallow_initial_mass_to_vary = wx.CheckBox(self, -1)\r\n\r\n self.lblEP_dry_mass = wx.StaticText(self, -1, \"Propulsion stage dry mass\")\r\n self.txtEP_dry_mass = wx.TextCtrl(self, -1, \"EP_dry_mass\")\r\n self.lblEP_dry_mass.Show(False)\r\n self.txtEP_dry_mass.Show(False)\r\n\r\n self.lblLV_type = wx.StaticText(self, -1, \"Launch vehicle type\")\r\n LV_choices = ['-2: custom launch vehicle','-1: burn with departure stage engine','0: fixed initial mass',\r\n '1: Atlas V (401) NLSII','2: Atlas V (411) NLSII','3: Atlas V (421) NLSII',\r\n '4: Atlas V (431) NLSII','5: Atlas V (501) NLSII','6: Atlas V (511) NLSII',\r\n '7: Atlas V (521) NLSII','8: Atlas V (531) NLSII','9: Atlas V (541) NLSII',\r\n '10: Atlas V (551) NLSII','11: Falcon 9 (v1.0) NLSII','12: Falcon 9 (v1.1) NLSII',\r\n '13: Atlas V (551) w/Star 48 NLSI','14: Falcon 9 Heavy (notional)','15: Delta IV Heavy NLSI',\r\n '16: SLS Block 1 (notional)']\r\n self.cmbLV_type = wx.ComboBox(self, -1, choices=LV_choices, style=wx.CB_READONLY)\r\n\r\n self.lblIspDS = wx.StaticText(self, -1, \"Departure stage Isp (s)\")\r\n self.txtIspDS = wx.TextCtrl(self, -1, \"IspDS\")\r\n\r\n self.lblcustom_LV_coefficients = wx.StaticText(self, -1, \"Custom launch vehicle coefficients (kg)\")\r\n self.lblcustom_LV_coefficients0 = wx.StaticText(self, -1, \"C3^5\")\r\n self.lblcustom_LV_coefficients1 = wx.StaticText(self, -1, \"C3^4\")\r\n self.lblcustom_LV_coefficients2 = wx.StaticText(self, -1, \"C3^3\")\r\n self.lblcustom_LV_coefficients3 = wx.StaticText(self, -1, \"C3^2\")\r\n self.lblcustom_LV_coefficients4 = wx.StaticText(self, -1, \"C3\")\r\n self.lblcustom_LV_coefficients5 = wx.StaticText(self, -1, \"1\")\r\n self.txtcustom_LV_coefficients0 = wx.TextCtrl(self, -1, \"custom_LV_coefficients[0]\")\r\n self.txtcustom_LV_coefficients1 = wx.TextCtrl(self, -1, \"custom_LV_coefficients[1]\")\r\n self.txtcustom_LV_coefficients2 = wx.TextCtrl(self, -1, \"custom_LV_coefficients[2]\")\r\n self.txtcustom_LV_coefficients3 = wx.TextCtrl(self, -1, \"custom_LV_coefficients[3]\")\r\n self.txtcustom_LV_coefficients4 = wx.TextCtrl(self, -1, \"custom_LV_coefficients[4]\")\r\n self.txtcustom_LV_coefficients5 = wx.TextCtrl(self, -1, \"custom_LV_coefficients[5]\")\r\n LV_coefficients_box = wx.FlexGridSizer(2, 6, 5, 5)\r\n LV_coefficients_box.AddMany([self.lblcustom_LV_coefficients0, self.lblcustom_LV_coefficients1, self.lblcustom_LV_coefficients2, self.lblcustom_LV_coefficients3, self.lblcustom_LV_coefficients4, self.lblcustom_LV_coefficients5,\r\n self.txtcustom_LV_coefficients0, self.txtcustom_LV_coefficients1, self.txtcustom_LV_coefficients2, self.txtcustom_LV_coefficients3, self.txtcustom_LV_coefficients4, self.txtcustom_LV_coefficients5])\r\n\r\n self.lblcustom_LV_C3_bounds = wx.StaticText(self, -1, \"Custom launch vehicle C3 bounds (km^2/s^2)\")\r\n self.txtcustom_LV_C3_bounds_lower = wx.TextCtrl(self, -1, \"custom_LV_C3_bounds[0]\")\r\n self.txtcustom_LV_C3_bounds_upper = wx.TextCtrl(self, -1, \"custom_LV_C3_bounds[1]\")\r\n custom_LV_C3_bounds_box = wx.BoxSizer(wx.HORIZONTAL)\r\n custom_LV_C3_bounds_box.AddMany([self.txtcustom_LV_C3_bounds_lower, self.txtcustom_LV_C3_bounds_upper])\r\n\r\n self.lblLV_adapter_mass = wx.StaticText(self, -1, \"Launch vehicle adapter mass (kg)\")\r\n self.txtLV_adapter_mass = wx.TextCtrl(self, -1, \"LV_margin\")\r\n\r\n self.lblparking_orbit_altitude = wx.StaticText(self, -1, \"Parking orbit altitude (km)\")\r\n self.txtparking_orbit_altitude = wx.TextCtrl(self, -1, \"parking_orbit_altitude\")\r\n\r\n self.lblparking_orbit_inclination = wx.StaticText(self, -1, \"Parking orbit inclination (degrees)\")\r\n self.txtparking_orbit_inclination = wx.TextCtrl(self, -1, \"parking_orbit_inclination\")\r\n\r\n spacecraftgrid.AddMany([self.lblmaximum_mass, self.txtmaximum_mass,\r\n self.lblallow_initial_mass_to_vary, self.chkallow_initial_mass_to_vary,\r\n self.lblEP_dry_mass, self.txtEP_dry_mass,\r\n self.lblLV_type, self.cmbLV_type,\r\n self.lblLV_adapter_mass, self.txtLV_adapter_mass,\r\n self.lblIspDS, self.txtIspDS,\r\n self.lblcustom_LV_coefficients, LV_coefficients_box,\r\n self.lblcustom_LV_C3_bounds, custom_LV_C3_bounds_box,\r\n self.lblparking_orbit_altitude, self.txtparking_orbit_altitude,\r\n self.lblparking_orbit_inclination, self.txtparking_orbit_inclination])\r\n\r\n spacecraftbox = wx.BoxSizer(wx.VERTICAL)\r\n spacecraftbox.AddMany([spacecraftgridtitle, spacecraftgrid])\r\n\r\n\r\n #terminal constraint/margining fields\r\n constraintsgrid = wx.FlexGridSizer(12,2,5,5)\r\n constraintsgridtitle = wx.StaticText(self, -1, \"Margins and Constraints\")\r\n\r\n self.lblpost_mission_Isp = wx.StaticText(self, -1, \"Isp for post-mission delta-v (s)\")\r\n self.txtpost_mission_Isp = wx.TextCtrl(self, -1, \"post_mission_Isp\")\r\n\r\n self.lblpropellant_margin = wx.StaticText(self, -1, \"Propellant margin (fraction)\")\r\n self.txtpropellant_margin = wx.TextCtrl(self, -1, \"propellant_margin\")\r\n\r\n self.lblpower_margin = wx.StaticText(self, -1, \"Power margin (fraction)\")\r\n self.txtpower_margin = wx.TextCtrl(self, -1, \"power_margin\")\r\n\r\n self.lblLV_margin = wx.StaticText(self, -1, \"Launch vehicle margin (fraction)\")\r\n self.txtLV_margin = wx.TextCtrl(self, -1, \"LV_margin\")\r\n\r\n self.lblenable_maximum_propellant_constraint = wx.StaticText(self, -1, \"Enable maximum propellant constraint?\")\r\n self.chkenable_propellant_mass_constraint = wx.CheckBox(self, -1)\r\n\r\n self.lblmaximum_propellant_mass = wx.StaticText(self, -1, \"Maximum propellant mass (kg)\")\r\n self.txtmaximum_propellant_mass = wx.TextCtrl(self, -1, \"maximum_propellant_mass\")\r\n\r\n constraintsgrid.AddMany([self.lblpropellant_margin, self.txtpropellant_margin,\r\n self.lblpower_margin, self.txtpower_margin,\r\n self.lblLV_margin, self.txtLV_margin,\r\n self.lblenable_maximum_propellant_constraint, self.chkenable_propellant_mass_constraint,\r\n self.lblmaximum_propellant_mass, self.txtmaximum_propellant_mass,\r\n self.lblpost_mission_Isp, self.txtpost_mission_Isp])\r\n\r\n constraintsbox = wx.BoxSizer(wx.VERTICAL)\r\n constraintsbox.AddMany([constraintsgridtitle, constraintsgrid])\r\n\r\n #propulsion\r\n propulsiongrid = wx.FlexGridSizer(26,2,5,5)\r\n propulsiongridtitle = wx.StaticText(self, -1, \"Propulsion options\")\r\n\r\n self.lblIspChem = wx.StaticText(self, -1, \"Chemical Isp (s)\")\r\n self.txtIspChem = wx.TextCtrl(self, -1, \"IspChem\")\r\n\r\n self.lblengine_type = wx.StaticText(self, -1, \"Engine type\")\r\n enginetypes = ['0: fixed thrust/Isp','1: constant Isp, efficiency, EMTG computes input power','2: choice of power model, constant efficiency, EMTG chooses Isp',\r\n '3: choice of power model, constant efficiency and Isp','4: continuously-varying specific impulse','5: custom thrust and mass flow rate polynomial',\r\n '6: NSTAR','7: XIPS-25','8: BPT-4000 High-Isp','9: BPT-4000 High-Thrust','10: BPT-4000 Ex-High-Isp','11: NEXT high-Isp v9',\r\n '12: VASIMR (argon, using analytical model, not available in open-source)','13: Hall Thruster (Xenon, using analytical model, not available in open-source)','14: NEXT high-ISP v10',\r\n '15: NEXT high-thrust v10','16: BPT-4000 MALTO','17: NEXIS Cardiff 8-15-201','18: H6MS Cardiff 8-15-2013','19: BHT20K Cardiff 8-16-2013','20: HiVHAC EM','21: 13 kW STMD Hall high-Isp (not available in open-source)','22: 13 kW STMD Hall high-thrust (not available in open-source)',\r\n '23: NEXT TT11 High-Thrust','24: NEXT TT11 High-Isp','25: NEXT TT11 Expanded Throttle Table',\r\n '26: 13 kW STMD Hall high-Isp 10-1-2014 (not available in open-source)','27: 13 kW STMD Hall medium-thrust 10-1-2014 (not available in open-source)','28: 13 kW STMD Hall high-thrust 10-1-2014 (not available in open-source)']\r\n\r\n self.cmbengine_type = wx.ComboBox(self, -1, choices = enginetypes, style=wx.CB_READONLY)\r\n\r\n self.lblnumber_of_engines = wx.StaticText(self, -1, \"Number of thrusters\")\r\n self.txtnumber_of_engines = wx.TextCtrl(self, -1, \"number_of_engines\")\r\n\r\n self.lblthrottle_logic_mode = wx.StaticText(self, -1, \"Throttle logic mode\")\r\n throttle_logic_types = ['maximum power use','maximum thrust','maximum Isp','maximum efficiency','maximum number of thrusters','minimum number of thrusters']\r\n self.cmbthrottle_logic_mode = wx.ComboBox(self, -1, choices = throttle_logic_types, style = wx.CB_READONLY)\r\n\r\n self.lblthrottle_sharpness = wx.StaticText(self, -1, \"Throttle sharpness\")\r\n self.txtthrottle_sharpness = wx.TextCtrl(self, -1, \"throttle_sharpness\")\r\n\r\n self.lblengine_duty_cycle = wx.StaticText(self, -1, \"Thruster duty cycle\")\r\n self.txtengine_duty_cycle = wx.TextCtrl(self, -1, \"engine_duty_cycle\")\r\n\r\n self.lblThrust = wx.StaticText(self, -1, \"Electric thruster thrust (N)\")\r\n self.txtThrust = wx.TextCtrl(self, -1, \"Thrust\")\r\n\r\n self.lblIspLT = wx.StaticText(self, -1, \"Electric thruster Isp (s)\")\r\n self.txtIspLT = wx.TextCtrl(self, -1, \"IspLT\")\r\n\r\n self.lblIspLT_minimum = wx.StaticText(self, -1, \"Minimum Isp for VSI systems (s)\")\r\n self.txtIspLT_minimum = wx.TextCtrl(self, -1, \"IspLT_minimum\")\r\n\r\n self.lbluser_defined_engine_efficiency = wx.StaticText(self, -1, \"Thruster efficiency\")\r\n self.txtuser_defined_engine_efficiency = wx.TextCtrl(self, -1, \"user_defined_engine_efficiency\")\r\n\r\n self.lblengine_coefficient_spacer = wx.StaticText(self, -1, \"\")\r\n self.lblengine_coefficient0 = wx.StaticText(self, -1, \"1.0\")\r\n self.lblengine_coefficient1 = wx.StaticText(self, -1, \"P\")\r\n self.lblengine_coefficient2 = wx.StaticText(self, -1, \"P^2\")\r\n self.lblengine_coefficient3 = wx.StaticText(self, -1, \"P^3\")\r\n self.lblengine_coefficient4 = wx.StaticText(self, -1, \"P^4\")\r\n self.lblengine_coefficient5 = wx.StaticText(self, -1, \"P^5\")\r\n self.lblengine_coefficient6 = wx.StaticText(self, -1, \"P^6\")\r\n\r\n self.lblengine_input_thrust_coefficients = wx.StaticText(self, -1, \"Custom thrust coefficients (mN)\")\r\n self.txtengine_input_thrust_coefficients0 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[0]\")\r\n self.txtengine_input_thrust_coefficients1 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[1]\")\r\n self.txtengine_input_thrust_coefficients2 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[2]\")\r\n self.txtengine_input_thrust_coefficients3 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[3]\")\r\n self.txtengine_input_thrust_coefficients4 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[4]\")\r\n self.txtengine_input_thrust_coefficients5 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[5]\")\r\n self.txtengine_input_thrust_coefficients6 = wx.TextCtrl(self, -1, \"engine_input_thrust_coefficients[6]\")\r\n\r\n self.lblengine_input_mass_flow_rate_coefficients = wx.StaticText(self, -1, \"Custom mass flow rate coefficients (mg/s)\")\r\n self.txtengine_input_mass_flow_rate_coefficients0 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[0]\")\r\n self.txtengine_input_mass_flow_rate_coefficients1 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[1]\")\r\n self.txtengine_input_mass_flow_rate_coefficients2 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[2]\")\r\n self.txtengine_input_mass_flow_rate_coefficients3 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[3]\")\r\n self.txtengine_input_mass_flow_rate_coefficients4 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[4]\")\r\n self.txtengine_input_mass_flow_rate_coefficients5 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[5]\")\r\n self.txtengine_input_mass_flow_rate_coefficients6 = wx.TextCtrl(self, -1, \"engine_input_mass_flow_rate_coefficients[6]\")\r\n\r\n self.lblengine_input_power_bounds = wx.StaticText(self, -1, \"Thruster input power bounds\")\r\n self.txtengine_input_power_bounds_lower = wx.TextCtrl(self, -1, \"engine_input_power_bounds[0]\")\r\n self.txtengine_input_power_bounds_upper = wx.TextCtrl(self, -1, \"engine_input_power_bounds[1]\")\r\n enginepowerbox = wx.BoxSizer(wx.HORIZONTAL)\r\n enginepowerbox.AddMany([self.txtengine_input_power_bounds_lower, self.txtengine_input_power_bounds_upper])\r\n\r\n customthrustergrid = wx.FlexGridSizer(3, 8, 5, 5)\r\n customthrustergrid.AddMany([self.lblengine_coefficient_spacer, self.lblengine_coefficient0, self.lblengine_coefficient1, self.lblengine_coefficient2, self.lblengine_coefficient3, self.lblengine_coefficient4, self.lblengine_coefficient5, self.lblengine_coefficient6,\r\n self.lblengine_input_thrust_coefficients, self.txtengine_input_thrust_coefficients0, self.txtengine_input_thrust_coefficients1, self.txtengine_input_thrust_coefficients2, self.txtengine_input_thrust_coefficients3, self.txtengine_input_thrust_coefficients4, self.txtengine_input_thrust_coefficients5, self.txtengine_input_thrust_coefficients6,\r\n self.lblengine_input_mass_flow_rate_coefficients, self.txtengine_input_mass_flow_rate_coefficients0, self.txtengine_input_mass_flow_rate_coefficients1, self.txtengine_input_mass_flow_rate_coefficients2, self.txtengine_input_mass_flow_rate_coefficients3, self.txtengine_input_mass_flow_rate_coefficients4, self.txtengine_input_mass_flow_rate_coefficients5, self.txtengine_input_mass_flow_rate_coefficients6])\r\n\r\n propulsiongrid.AddMany([self.lblIspChem, self.txtIspChem,\r\n self.lblengine_type, self.cmbengine_type,\r\n self.lblnumber_of_engines, self.txtnumber_of_engines,\r\n self.lblthrottle_logic_mode, self.cmbthrottle_logic_mode,\r\n self.lblthrottle_sharpness, self.txtthrottle_sharpness,\r\n self.lblengine_duty_cycle, self.txtengine_duty_cycle,\r\n self.lblThrust, self.txtThrust,\r\n self.lblIspLT, self.txtIspLT,\r\n self.lblIspLT_minimum, self.txtIspLT_minimum,\r\n self.lbluser_defined_engine_efficiency, self.txtuser_defined_engine_efficiency,\r\n self.lblengine_input_power_bounds, enginepowerbox])\r\n\r\n\r\n\r\n propulsionbox = wx.BoxSizer(wx.VERTICAL)\r\n propulsionbox.AddMany([propulsiongridtitle, propulsiongrid, customthrustergrid])\r\n\r\n #power\r\n powergrid = wx.FlexGridSizer(20,2,5,5)\r\n self.powergridtitle = wx.StaticText(self, -1, \"Power options\")\r\n\r\n self.lblpower_at_1_AU = wx.StaticText(self, -1, \"Power at 1 AU (kW)\")\r\n self.txtpower_at_1_AU = wx.TextCtrl(self, -1, \"power_at_1_AU\")\r\n\r\n self.lblpower_source_type = wx.StaticText(self, -1, \"Power source type\")\r\n power_source_choices = ['0: solar','1: radioisotope']\r\n self.cmbpower_source_type = wx.ComboBox(self, -1, choices=power_source_choices, style=wx.CB_READONLY)\r\n\r\n self.lblsolar_power_gamma = wx.StaticText(self, -1, \"Solar power coefficients\")\r\n self.txtsolar_power_gamma0 = wx.TextCtrl(self, -1, \"solar_power_gamma[0]\")\r\n self.txtsolar_power_gamma1 = wx.TextCtrl(self, -1, \"solar_power_gamma[1]\")\r\n self.txtsolar_power_gamma2 = wx.TextCtrl(self, -1, \"solar_power_gamma[2]\")\r\n self.txtsolar_power_gamma3 = wx.TextCtrl(self, -1, \"solar_power_gamma[3]\")\r\n self.txtsolar_power_gamma4 = wx.TextCtrl(self, -1, \"solar_power_gamma[4]\")\r\n solarpowerbox = wx.BoxSizer(wx.HORIZONTAL)\r\n solarpowerbox.AddMany([self.txtsolar_power_gamma0, self.txtsolar_power_gamma1, self.txtsolar_power_gamma2, self.txtsolar_power_gamma3, self.txtsolar_power_gamma4])\r\n\r\n self.lblspacecraft_power_model_type = wx.StaticText(self, -1, \"Spacecraft power model type\")\r\n power_model_choices = ['0: P_sc = A + B/r + C/r^2','1: P_sc = A if P > A, A + B(C - P) otherwise']\r\n self.cmbspacecraft_power_model_type = wx.ComboBox(self, -1, choices=power_model_choices, style = wx.CB_READONLY)\r\n \r\n self.lblspacecraft_power_coefficients = wx.StaticText(self, -1, \"Spacecraft power coefficients\")\r\n self.txtspacecraft_power_coefficients0 = wx.TextCtrl(self, -1, \"spacecraft_power_coefficients[0]\")\r\n self.txtspacecraft_power_coefficients1 = wx.TextCtrl(self, -1, \"spacecraft_power_coefficients[1]\")\r\n self.txtspacecraft_power_coefficients2 = wx.TextCtrl(self, -1, \"spacecraft_power_coefficients[2]\")\r\n spacecraftpowerbox = wx.BoxSizer(wx.HORIZONTAL)\r\n spacecraftpowerbox.AddMany([self.txtspacecraft_power_coefficients0, self.txtspacecraft_power_coefficients1, self.txtspacecraft_power_coefficients2])\r\n\r\n self.lblpower_decay_rate = wx.StaticText(self, -1, \"Power decay rate (fraction per year)\")\r\n self.txtpower_decay_rate = wx.TextCtrl(self, -1, \"power_decay_rate\")\r\n\r\n powergrid.AddMany([self.lblpower_at_1_AU, self.txtpower_at_1_AU,\r\n self.lblpower_source_type, self.cmbpower_source_type,\r\n self.lblsolar_power_gamma, solarpowerbox,\r\n self.lblspacecraft_power_model_type, self.cmbspacecraft_power_model_type,\r\n self.lblspacecraft_power_coefficients, spacecraftpowerbox,\r\n self.lblpower_decay_rate, self.txtpower_decay_rate])\r\n\r\n powerbox = wx.BoxSizer(wx.VERTICAL)\r\n powerbox.AddMany([self.powergridtitle, powergrid])\r\n\r\n #now tie everything together\r\n leftvertsizer = wx.BoxSizer(wx.VERTICAL)\r\n leftvertsizer.AddMany([spacecraftbox, propulsionbox, powerbox])\r\n \r\n self.mainbox = wx.BoxSizer(wx.HORIZONTAL)\r\n self.mainbox.AddMany([leftvertsizer, constraintsbox]) \r\n\r\n font = self.GetFont()\r\n font.SetWeight(wx.FONTWEIGHT_BOLD)\r\n spacecraftgridtitle.SetFont(font)\r\n self.powergridtitle.SetFont(font)\r\n constraintsgridtitle.SetFont(font)\r\n propulsiongridtitle.SetFont(font)\r\n\r\n self.SetSizer(self.mainbox)\r\n self.SetupScrolling()\r\n\r\n\r\nclass JourneyOptionsPanel(wx.lib.scrolledpanel.ScrolledPanel):\r\n def __init__(self, parent):\r\n \r\n wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent)\r\n\r\n self.JourneyList = []\r\n\r\n self.JourneySelectBox = wx.ListBox(self, -1, choices=self.JourneyList, size=(300,200), style=wx.LB_SINGLE)\r\n self.btnAddNewJourney = wx.Button(self, -1, \"New Journey\", size=(200,-1))\r\n self.btnDeleteJourney = wx.Button(self, -1, \"Delete Journey\", size=(200,-1))\r\n self.btnMoveJourneyUp = wx.Button(self, -1, \"Move Journey Up\", size=(200,-1))\r\n self.btnMoveJourneyDown = wx.Button(self, -1, \"Move Journey Down\", size=(200,-1))\r\n\r\n buttonstacksizer = wx.BoxSizer(wx.VERTICAL)\r\n buttonstacksizer.AddMany([self.btnAddNewJourney, self.btnDeleteJourney, self.btnMoveJourneyUp, self.btnMoveJourneyDown])\r\n\r\n JourneySelectionSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n JourneySelectionSizer.Add(self.JourneySelectBox)\r\n JourneySelectionSizer.AddSpacer(5)\r\n JourneySelectionSizer.Add(buttonstacksizer)\r\n\r\n self.lbljourney_names = wx.StaticText(self, -1, \"Journey name\")\r\n self.txtjourney_names = wx.TextCtrl(self, -1, \"journey_names\", size=(300,-1))\r\n\r\n self.lbljourney_central_body = wx.StaticText(self, -1, \"Central body\")\r\n self.txtjourney_central_body = wx.TextCtrl(self, -1, \"journey_central_body\")\r\n self.btnjourney_central_body = wx.Button(self, -1, \"...\")\r\n journey_central_body_box = wx.BoxSizer(wx.HORIZONTAL)\r\n journey_central_body_box.Add(self.txtjourney_central_body)\r\n journey_central_body_box.AddSpacer(5)\r\n journey_central_body_box.Add(self.btnjourney_central_body)\r\n\r\n self.lbldestination_list = wx.StaticText(self, -1, \"Destination list\")\r\n self.txtdestination_list = wx.TextCtrl(self, -1, \"destination_list\")\r\n self.btndestination_list = wx.Button(self, -1, \"...\")\r\n destination_list_box = wx.BoxSizer(wx.HORIZONTAL)\r\n destination_list_box.Add(self.txtdestination_list)\r\n destination_list_box.AddSpacer(5)\r\n destination_list_box.Add(self.btndestination_list)\r\n\r\n self.lbljourney_starting_mass_increment = wx.StaticText(self, -1, \"Starting mass increment (kg)\")\r\n self.txtjourney_starting_mass_increment = wx.TextCtrl(self, -1, \"journey_starting_mass_increment\")\r\n\r\n self.lbljourney_variable_mass_increment = wx.StaticText(self, -1, \"Variable mass increment\")\r\n self.chkjourney_variable_mass_increment = wx.CheckBox(self, -1)\r\n\r\n self.lbljourney_wait_time_bounds = wx.StaticText(self, -1, \"Wait time bounds\")\r\n self.txtjourney_wait_time_bounds_lower = wx.TextCtrl(self, -1, \"journey_wait_time_bounds[0]\")\r\n self.txtjourney_wait_time_bounds_upper = wx.TextCtrl(self, -1, \"journey_wait_time_bounds[1]\")\r\n wait_time_sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n wait_time_sizer.AddMany([self.txtjourney_wait_time_bounds_lower, self.txtjourney_wait_time_bounds_upper])\r\n\r\n self.lbljourney_timebounded = wx.StaticText(self, -1, \"Journey time bounds\")\r\n journey_time_bounds_choices = ['unbounded','bounded flight time','bounded arrival date','bounded aggregate flight time']\r\n self.cmbjourney_timebounded = wx.ComboBox(self, -1, choices=journey_time_bounds_choices, style=wx.CB_READONLY)\r\n\r\n self.lbljourney_flight_time_bounds = wx.StaticText(self, -1, \"Journey flight time bounds\")\r\n self.txtjourney_flight_time_bounds_lower = wx.TextCtrl(self, -1, \"journey_flight_time_bounds[0]\")\r\n self.txtjourney_flight_time_bounds_upper = wx.TextCtrl(self, -1, \"journey_flight_time_bounds[1]\")\r\n flight_time_sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n flight_time_sizer.AddMany([self.txtjourney_flight_time_bounds_lower, self.txtjourney_flight_time_bounds_upper])\r\n\r\n self.lbljourney_arrival_date_bounds = wx.StaticText(self, -1, \"Journey arrival date bounds\")\r\n self.txtjourney_arrival_date_bounds_lower = wx.TextCtrl(self, -1, \"journey_arrival_date_bounds[0]\")\r\n self.txtjourney_arrival_date_bounds_upper = wx.TextCtrl(self, -1, \"journey_arrival_date_bounds[1]\")\r\n self.ArrivalDateLowerCalendar = wx.calendar.CalendarCtrl(self, -1)\r\n self.ArrivalDateUpperCalendar = wx.calendar.CalendarCtrl(self, -1)\r\n arrival_date_sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n arrival_date_sizer.AddMany([self.txtjourney_arrival_date_bounds_lower, self.ArrivalDateLowerCalendar, self.txtjourney_arrival_date_bounds_upper, self.ArrivalDateUpperCalendar])\r\n\r\n self.lbljourney_initial_impulse_bounds = wx.StaticText(self, -1, \"Journey initial impulse bounds\")\r\n self.txtjourney_initial_impulse_bounds_lower = wx.TextCtrl(self, -1, \"journey_initial_impulse_bounds[0]\")\r\n self.txtjourney_initial_impulse_bounds_upper = wx.TextCtrl(self, -1, \"journey_initial_impulse_bounds[1]\")\r\n initial_impulse_sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n initial_impulse_sizer.AddMany([self.txtjourney_initial_impulse_bounds_lower, self.txtjourney_initial_impulse_bounds_upper])\r\n\r\n self.lbljourney_departure_type = wx.StaticText(self, -1, \"Journey departure type\")\r\n journey_departure_type_choices = ['0: launch or direct insertion','1: depart from parking orbit','2: free direct departure',\r\n '3: flyby','4: flyby with fixed v-infinity-out','5: Spiral-out from circular orbit','6: zero-turn flyby (for small bodies)']\r\n self.cmbjourney_departure_type = wx.ComboBox(self, -1, choices=journey_departure_type_choices, style=wx.CB_READONLY)\r\n\r\n self.lbljourney_initial_velocity = wx.StaticText(self, -1, \"Journey initial velocity vector\")\r\n self.txtjourney_initial_velocity0 = wx.TextCtrl(self, -1, \"journey_initial_velocity[0]\")\r\n self.txtjourney_initial_velocity1 = wx.TextCtrl(self, -1, \"journey_initial_velocity[1]\")\r\n self.txtjourney_initial_velocity2 = wx.TextCtrl(self, -1, \"journey_initial_velocity[2]\")\r\n journey_initial_velocity_box = wx.BoxSizer(wx.HORIZONTAL)\r\n journey_initial_velocity_box.AddMany([self.txtjourney_initial_velocity0, self.txtjourney_initial_velocity1, self.txtjourney_initial_velocity2])\r\n\r\n self.lbljourney_escape_spiral_starting_radius = wx.StaticText(self, -1, \"Orbital radius for beginning of escape spiral (km)\")\r\n self.txtjourney_escape_spiral_starting_radius = wx.TextCtrl(self, -1, \"journey_escape_spiral_starting_radius\")\r\n\r\n self.lbljourney_maximum_DSM_magnitude_flag = wx.StaticText(self, -1, \"Enable journey DSM magnitude constraint?\")\r\n self.chkjourney_maximum_DSM_magnitude_flag = wx.CheckBox(self, -1)\r\n self.lbljourney_maximum_DSM_magnitude = wx.StaticText(self, -1, \"Journey maximum DSM magnitude (km/s)\")\r\n self.txtjourney_maximum_DSM_magnitude = wx.TextCtrl(self, -1, \"journey_maximum_DSM_magnitude\")\r\n\r\n\r\n self.lbljourney_arrival_type = wx.StaticText(self, -1, \"Journey arrival type\")\r\n journey_arrival_type_choices = ['0: insertion into parking orbit (use chemical Isp)','1: rendezvous (use chemical Isp)','2: intercept with bounded V_infinity',\r\n '3: low-thrust rendezvous (does not work if terminal phase is not low-thrust)','4: match final v-infinity vector',\r\n '5: match final v-infinity vector (low-thrust)','6: escape (E = 0)','7: capture spiral']\r\n self.cmbjourney_arrival_type = wx.ComboBox(self, -1, choices=journey_arrival_type_choices, style=wx.CB_READONLY)\r\n\r\n self.lbljourney_capture_spiral_final_radius = wx.StaticText(self, -1, \"Orbital radius for end of capture spiral (km)\")\r\n self.txtjourney_capture_spiral_final_radius = wx.TextCtrl(self, -1, \"journey_capture_spiral_final_radius\")\r\n\r\n self.lbljourney_final_velocity = wx.StaticText(self, -1, \"Journey final velocity vector\")\r\n self.txtjourney_final_velocity0 = wx.TextCtrl(self, -1, \"journey_final_velocity[0]\")\r\n self.txtjourney_final_velocity1 = wx.TextCtrl(self, -1, \"journey_final_velocity[1]\")\r\n self.txtjourney_final_velocity2 = wx.TextCtrl(self, -1, \"journey_final_velocity[2]\")\r\n journey_final_velocity_box = wx.BoxSizer(wx.HORIZONTAL)\r\n journey_final_velocity_box.AddMany([self.txtjourney_final_velocity0, self.txtjourney_final_velocity1, self.txtjourney_final_velocity2])\r\n\r\n self.lbljourney_arrival_declination_constraint_flag = wx.StaticText(self, -1, \"Apply arrival declination constraint?\")\r\n self.chkjourney_arrival_declination_constraint_flag = wx.CheckBox(self, -1)\r\n self.lbljourney_arrival_declination_bounds = wx.StaticText(self, -1, \"Arrival Declination bounds\")\r\n self.txtjourney_arrival_declination_bounds_lower = wx.TextCtrl(self, -1, \"journey_arrival_declination_bounds[0]\")\r\n self.txtjourney_arrival_declination_bounds_upper = wx.TextCtrl(self, -1, \"journey_arrival_declination_bounds[1]\")\r\n declination_bounds_box = wx.BoxSizer(wx.HORIZONTAL)\r\n declination_bounds_box.AddMany([self.txtjourney_arrival_declination_bounds_lower, self.txtjourney_arrival_declination_bounds_upper])\r\n\r\n self.lblsequence = wx.StaticText(self, -1, \"Flyby sequence\")\r\n self.txtsequence = wx.TextCtrl(self, -1, \"sequence\", size=(300,60), style=wx.TE_MULTILINE)\r\n self.btnsequence = wx.Button(self, -1, \"...\")\r\n sequence_box = wx.BoxSizer(wx.HORIZONTAL)\r\n sequence_box.Add(self.txtsequence)\r\n sequence_box.AddSpacer(5)\r\n sequence_box.Add(self.btnsequence)\r\n\r\n self.lbljourney_perturbation_bodies = wx.StaticText(self, -1, \"Perturbation_bodies\")\r\n self.txtjourney_perturbation_bodies = wx.TextCtrl(self, -1, \"journey_perturbation_bodies\", size=(300,-1))\r\n self.btnjourney_perturbation_bodies = wx.Button(self, -1, \"...\")\r\n journey_perturbation_bodies_box = wx.BoxSizer(wx.HORIZONTAL)\r\n journey_perturbation_bodies_box.Add(self.txtjourney_perturbation_bodies)\r\n journey_perturbation_bodies_box.AddSpacer(5)\r\n journey_perturbation_bodies_box.Add(self.btnjourney_perturbation_bodies)\r\n\r\n \r\n\r\n JourneyInformationGrid = wx.FlexGridSizer(40,2,5,5)\r\n JourneyInformationGrid.AddMany([self.lbljourney_names, self.txtjourney_names,\r\n self.lbljourney_central_body, journey_central_body_box,\r\n self.lbldestination_list, destination_list_box,\r\n self.lbljourney_starting_mass_increment, self.txtjourney_starting_mass_increment,\r\n self.lbljourney_variable_mass_increment, self.chkjourney_variable_mass_increment,\r\n self.lbljourney_wait_time_bounds, wait_time_sizer,\r\n self.lbljourney_timebounded, self.cmbjourney_timebounded,\r\n self.lbljourney_flight_time_bounds, flight_time_sizer,\r\n self.lbljourney_arrival_date_bounds, arrival_date_sizer,\r\n self.lbljourney_initial_impulse_bounds, initial_impulse_sizer,\r\n self.lbljourney_departure_type, self.cmbjourney_departure_type,\r\n self.lbljourney_escape_spiral_starting_radius, self.txtjourney_escape_spiral_starting_radius,\r\n self.lbljourney_initial_velocity, journey_initial_velocity_box,\r\n self.lbljourney_maximum_DSM_magnitude_flag, self.chkjourney_maximum_DSM_magnitude_flag,\r\n self.lbljourney_maximum_DSM_magnitude, self.txtjourney_maximum_DSM_magnitude,\r\n self.lbljourney_arrival_type, self.cmbjourney_arrival_type,\r\n self.lbljourney_capture_spiral_final_radius, self.txtjourney_capture_spiral_final_radius,\r\n self.lbljourney_final_velocity, journey_final_velocity_box,\r\n self.lbljourney_arrival_declination_constraint_flag, self.chkjourney_arrival_declination_constraint_flag,\r\n self.lbljourney_arrival_declination_bounds, declination_bounds_box,\r\n self.lblsequence, sequence_box,\r\n self.lbljourney_perturbation_bodies, journey_perturbation_bodies_box])\r\n\r\n JourneyInformationStacker = wx.BoxSizer(wx.VERTICAL)\r\n JourneyInformationStacker.AddMany([JourneySelectionSizer, JourneyInformationGrid])\r\n\r\n #custom departure elements\r\n self.boxjourney_departure_elements = wx.StaticBox(self, -1, \"Journey departure elements\")\r\n self.lbljourney_departure_elements_type= wx.StaticText(self, -1, \"Journey departure elements type\")\r\n journey_departure_elements_type_choices = ['0: inertial', '1: COE']\r\n self.cmbjourney_departure_elements_type = wx.ComboBox(self, -1, choices=journey_departure_elements_type_choices, style=wx.CB_READONLY)\r\n departure_elements_type_box = wx.BoxSizer(wx.HORIZONTAL)\r\n departure_elements_type_box.Add(self.lbljourney_departure_elements_type)\r\n departure_elements_type_box.AddSpacer(5)\r\n departure_elements_type_box.Add(self.cmbjourney_departure_elements_type)\r\n\r\n empty_departure_cell = wx.StaticText(self, -1, \"\")\r\n self.lblvarydepartureelements = wx.StaticText(self, -1, \"Vary?\")\r\n self.lbldepartureelementsvalue = wx.StaticText(self, -1, \"Value\")\r\n self.lbldepartureelementslower = wx.StaticText(self, -1, \"Lower bound\")\r\n self.lbldepartureelementsupper = wx.StaticText(self, -1, \"Upper bound\")\r\n self.lblSMA_departure = wx.StaticText(self, -1, \"SMA\")\r\n self.lblECC_departure = wx.StaticText(self, -1, \"ECC\")\r\n self.lblINC_departure = wx.StaticText(self, -1, \"INC\")\r\n self.lblRAAN_departure = wx.StaticText(self, -1, \"RAAN\")\r\n self.lblAOP_departure = wx.StaticText(self, -1, \"AOP\")\r\n self.lblMA_departure = wx.StaticText(self, -1, \"MA\")\r\n self.chkSMA_departure = wx.CheckBox(self, -1)\r\n self.chkECC_departure = wx.CheckBox(self, -1)\r\n self.chkINC_departure = wx.CheckBox(self, -1)\r\n self.chkRAAN_departure = wx.CheckBox(self, -1)\r\n self.chkAOP_departure = wx.CheckBox(self, -1)\r\n self.chkMA_departure = wx.CheckBox(self, -1)\r\n self.txtSMA_departure = wx.TextCtrl(self, -1, \"SMA_val\")\r\n self.txtECC_departure = wx.TextCtrl(self, -1, \"ECC_val\")\r\n self.txtINC_departure = wx.TextCtrl(self, -1, \"INC_val\")\r\n self.txtRAAN_departure = wx.TextCtrl(self, -1, \"RAAN_val\")\r\n self.txtAOP_departure = wx.TextCtrl(self, -1, \"AOP_val\")\r\n self.txtMA_departure = wx.TextCtrl(self, -1, \"MA_val\")\r\n self.txtSMA_departure0 = wx.TextCtrl(self, -1, \"SMA_val0\")\r\n self.txtECC_departure0 = wx.TextCtrl(self, -1, \"ECC_val0\")\r\n self.txtINC_departure0 = wx.TextCtrl(self, -1, \"INC_val0\")\r\n self.txtRAAN_departure0 = wx.TextCtrl(self, -1, \"RAAN_val0\")\r\n self.txtAOP_departure0 = wx.TextCtrl(self, -1, \"AOP_val0\")\r\n self.txtMA_departure0 = wx.TextCtrl(self, -1, \"MA_val0\")\r\n self.txtSMA_departure1 = wx.TextCtrl(self, -1, \"SMA_val1\")\r\n self.txtECC_departure1 = wx.TextCtrl(self, -1, \"ECC_val1\")\r\n self.txtINC_departure1 = wx.TextCtrl(self, -1, \"INC_val1\")\r\n self.txtRAAN_departure1 = wx.TextCtrl(self, -1, \"RAAN_val1\")\r\n self.txtAOP_departure1 = wx.TextCtrl(self, -1, \"AOP_val1\")\r\n self.txtMA_departure1 = wx.TextCtrl(self, -1, \"MA_val1\")\r\n DepartureElementsSizer = wx.FlexGridSizer(14,5,5,5)\r\n DepartureElementsSizer.AddMany([empty_departure_cell, self.lblvarydepartureelements, self.lbldepartureelementsvalue, self.lbldepartureelementslower, self.lbldepartureelementsupper, \r\n self.lblSMA_departure, self.chkSMA_departure, self.txtSMA_departure, self.txtSMA_departure0, self.txtSMA_departure1,\r\n self.lblECC_departure, self.chkECC_departure, self.txtECC_departure, self.txtECC_departure0, self.txtECC_departure1,\r\n self.lblINC_departure, self.chkINC_departure, self.txtINC_departure, self.txtINC_departure0, self.txtINC_departure1,\r\n self.lblRAAN_departure, self.chkRAAN_departure, self.txtRAAN_departure, self.txtRAAN_departure0, self.txtRAAN_departure1,\r\n self.lblAOP_departure, self.chkAOP_departure, self.txtAOP_departure, self.txtAOP_departure0, self.txtAOP_departure1,\r\n self.lblMA_departure, self.chkMA_departure, self.txtMA_departure, self.txtMA_departure0, self.txtMA_departure1])\r\n self.DepartureElementsBox = wx.StaticBoxSizer(self.boxjourney_departure_elements, wx.VERTICAL)\r\n self.DepartureElementsBox.AddMany([departure_elements_type_box, DepartureElementsSizer])\r\n \r\n font = self.GetFont()\r\n font.SetWeight(wx.FONTWEIGHT_BOLD)\r\n self.boxjourney_departure_elements.SetFont(font)\r\n\r\n\r\n #custom arrival elements\r\n self.boxjourney_arrival_elements = wx.StaticBox(self, -1, \"Journey arrival elements\")\r\n self.lbljourney_arrival_elements_type= wx.StaticText(self, -1, \"Journey arrival elements type\")\r\n journey_arrival_elements_type_choices = ['0: inertial', '1: COE']\r\n self.cmbjourney_arrival_elements_type = wx.ComboBox(self, -1, choices=journey_arrival_elements_type_choices, style=wx.CB_READONLY)\r\n arrival_elements_type_box = wx.BoxSizer(wx.HORIZONTAL)\r\n arrival_elements_type_box.Add(self.lbljourney_arrival_elements_type)\r\n arrival_elements_type_box.AddSpacer(5)\r\n arrival_elements_type_box.Add(self.cmbjourney_arrival_elements_type)\r\n\r\n empty_arrival_cell = wx.StaticText(self, -1, \"\")\r\n self.lblvaryarrivalelements = wx.StaticText(self, -1, \"Vary?\")\r\n self.lblarrivalelementsvalue = wx.StaticText(self, -1, \"Value\")\r\n self.lblarrivalelementslower = wx.StaticText(self, -1, \"Lower bound\")\r\n self.lblarrivalelementsupper = wx.StaticText(self, -1, \"Upper bound\")\r\n self.lblSMA_arrival = wx.StaticText(self, -1, \"SMA\")\r\n self.lblECC_arrival = wx.StaticText(self, -1, \"ECC\")\r\n self.lblINC_arrival = wx.StaticText(self, -1, \"INC\")\r\n self.lblRAAN_arrival = wx.StaticText(self, -1, \"RAAN\")\r\n self.lblAOP_arrival = wx.StaticText(self, -1, \"AOP\")\r\n self.lblMA_arrival = wx.StaticText(self, -1, \"MA\")\r\n self.chkSMA_arrival = wx.CheckBox(self, -1)\r\n self.chkECC_arrival = wx.CheckBox(self, -1)\r\n self.chkINC_arrival = wx.CheckBox(self, -1)\r\n self.chkRAAN_arrival = wx.CheckBox(self, -1)\r\n self.chkAOP_arrival = wx.CheckBox(self, -1)\r\n self.chkMA_arrival = wx.CheckBox(self, -1)\r\n self.txtSMA_arrival = wx.TextCtrl(self, -1, \"SMA_val\")\r\n self.txtECC_arrival = wx.TextCtrl(self, -1, \"ECC_val\")\r\n self.txtINC_arrival = wx.TextCtrl(self, -1, \"INC_val\")\r\n self.txtRAAN_arrival = wx.TextCtrl(self, -1, \"RAAN_val\")\r\n self.txtAOP_arrival = wx.TextCtrl(self, -1, \"AOP_val\")\r\n self.txtMA_arrival = wx.TextCtrl(self, -1, \"MA_val\")\r\n self.txtSMA_arrival0 = wx.TextCtrl(self, -1, \"SMA_val0\")\r\n self.txtECC_arrival0 = wx.TextCtrl(self, -1, \"ECC_val0\")\r\n self.txtINC_arrival0 = wx.TextCtrl(self, -1, \"INC_val0\")\r\n self.txtRAAN_arrival0 = wx.TextCtrl(self, -1, \"RAAN_val0\")\r\n self.txtAOP_arrival0 = wx.TextCtrl(self, -1, \"AOP_val0\")\r\n self.txtMA_arrival0 = wx.TextCtrl(self, -1, \"MA_val0\")\r\n self.txtSMA_arrival1 = wx.TextCtrl(self, -1, \"SMA_val1\")\r\n self.txtECC_arrival1 = wx.TextCtrl(self, -1, \"ECC_val1\")\r\n self.txtINC_arrival1 = wx.TextCtrl(self, -1, \"INC_val1\")\r\n self.txtRAAN_arrival1 = wx.TextCtrl(self, -1, \"RAAN_val1\")\r\n self.txtAOP_arrival1 = wx.TextCtrl(self, -1, \"AOP_val1\")\r\n self.txtMA_arrival1 = wx.TextCtrl(self, -1, \"MA_val1\")\r\n ArrivalElementsSizer = wx.FlexGridSizer(14,5,5,5)\r\n ArrivalElementsSizer.AddMany([empty_arrival_cell, self.lblvaryarrivalelements, self.lblarrivalelementsvalue, self.lblarrivalelementslower, self.lblarrivalelementsupper, \r\n self.lblSMA_arrival, self.chkSMA_arrival, self.txtSMA_arrival, self.txtSMA_arrival0, self.txtSMA_arrival1,\r\n self.lblECC_arrival, self.chkECC_arrival, self.txtECC_arrival, self.txtECC_arrival0, self.txtECC_arrival1,\r\n self.lblINC_arrival, self.chkINC_arrival, self.txtINC_arrival, self.txtINC_arrival0, self.txtINC_arrival1,\r\n self.lblRAAN_arrival, self.chkRAAN_arrival, self.txtRAAN_arrival, self.txtRAAN_arrival0, self.txtRAAN_arrival1,\r\n self.lblAOP_arrival, self.chkAOP_arrival, self.txtAOP_arrival, self.txtAOP_arrival0, self.txtAOP_arrival1,\r\n self.lblMA_arrival, self.chkMA_arrival, self.txtMA_arrival, self.txtMA_arrival0, self.txtMA_arrival1])\r\n self.ArrivalElementsBox = wx.StaticBoxSizer(self.boxjourney_arrival_elements, wx.VERTICAL)\r\n self.ArrivalElementsBox.AddMany([arrival_elements_type_box, ArrivalElementsSizer])\r\n \r\n font = self.GetFont()\r\n font.SetWeight(wx.FONTWEIGHT_BOLD)\r\n self.boxjourney_arrival_elements.SetFont(font)\r\n\r\n ElementsStacker = wx.BoxSizer(wx.VERTICAL)\r\n ElementsStacker.AddMany([self.DepartureElementsBox, self.ArrivalElementsBox])\r\n\r\n self.mainbox = wx.BoxSizer(wx.HORIZONTAL)\r\n self.mainbox.AddMany([JourneyInformationStacker, ElementsStacker])\r\n self.SetSizer(self.mainbox)\r\n self.SetupScrolling()\r\n \r\nclass SolverOptionsPanel(wx.lib.scrolledpanel.ScrolledPanel):\r\n def __init__(self, parent):\r\n\r\n wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent)\r\n \r\n innerloopgrid = wx.GridSizer(28,2,5,5)\r\n \r\n self.lblInnerLoopSolver = wx.StaticText(self, -1, \"Inner-loop Solver Mode\")\r\n innerloopsolvertypes = ['Evaluate trialX', 'Evaluate a batch of trialX vectors','Monotonic Basin Hopping',\r\n 'Adaptive Constrained Differential Evolution','SNOPT with initial guess']\r\n self.cmbInnerLoopSolver = wx.ComboBox(self, -1, choices = innerloopsolvertypes, style=wx.CB_READONLY)\r\n\r\n self.lblNLP_solver_type = wx.StaticText(self, -1, \"NLP solver\")\r\n NLP_solver_types = ['SNOPT','WORHP']\r\n self.cmbNLP_solver_type = wx.ComboBox(self, -1, choices = NLP_solver_types, style=wx.CB_READONLY)\r\n\r\n self.lblNLP_solver_mode = wx.StaticText(self, -1, \"NLP solver mode\")\r\n NLP_solver_modes = ['Feasible point','Optimize']\r\n self.cmbNLP_solver_mode = wx.ComboBox(self, -1, choices = NLP_solver_modes, style = wx.CB_READONLY)\r\n\r\n self.lblquiet_NLP = wx.StaticText(self, -1, \"Quiet NLP solver?\")\r\n self.chkquiet_NLP = wx.CheckBox(self, -1)\r\n\r\n self.lblquiet_MBH = wx.StaticText(self, -1, \"Quiet MBH solver?\")\r\n self.chkquiet_MBH = wx.CheckBox(self, -1)\r\n\r\n self.lblMBH_two_step = wx.StaticText(self, -1, \"Two-step MBH?\")\r\n self.chkMBH_two_step = wx.CheckBox(self, -1)\r\n\r\n self.lblFD_stepsize = wx.StaticText(self, -1, \"Finite differencing step size\")\r\n self.txtFD_stepsize = wx.TextCtrl(self, -1, \"FD_stepsize\")\r\n\r\n self.lblFD_stepsize_coarse = wx.StaticText(self, -1, \"Finite differencing coarse step size\")\r\n self.txtFD_stepsize_coarse = wx.TextCtrl(self, -1, \"FD_stepsize_coarse\")\r\n\r\n self.lblACE_feasible_point_finder = wx.StaticText(self, -1, \"Enable ACE feasible point finder?\")\r\n self.chkACE_feasible_point_finder = wx.CheckBox(self, -1)\r\n \r\n self.lblMBH_max_not_improve = wx.StaticText(self, -1, \"MBH Impatience\")\r\n self.txtMBH_max_not_improve = wx.TextCtrl(self, -1, \"MBH_max_not_improve\")\r\n \r\n self.lblMBH_max_trials = wx.StaticText(self, -1, \"Maximum number of innerloop trials\")\r\n self.txtMBH_max_trials = wx.TextCtrl(self, -1, \"MBH_max_trials\")\r\n \r\n self.lblMBH_max_run_time = wx.StaticText(self, -1, \"Maximum run-time\")\r\n self.txtMBH_max_run_time = wx.TextCtrl(self, -1, \"MBH_max_run_time\")\r\n \r\n self.lblMBH_max_step_size = wx.StaticText(self, -1, \"MBH maximum perturbation size\")\r\n self.txtMBH_max_step_size = wx.TextCtrl(self, -1, \"MBH_max_step_size\")\r\n\r\n self.lblMBH_hop_distribution = wx.StaticText(self, -1, \"MBH hop probability distribution\")\r\n hop_distribution_choices = [\"Uniform\",\"Cauchy\",\"Pareto\",\"Gaussian\"]\r\n self.cmbMBH_hop_distribution = wx.ComboBox(self, -1, choices = hop_distribution_choices, style = wx.CB_READONLY)\r\n\r\n self.lblMBH_Pareto_alpha = wx.StaticText(self, -1, \"MBH Pareto distribution alpha\")\r\n self.txtMBH_Pareto_alpha = wx.TextCtrl(self, -1, \"MBH_Pareto_alpha\")\r\n\r\n self.lblMBH_time_hop_probability = wx.StaticText(self, -1, \"Probability of MBH time hop\")\r\n self.txtMBH_time_hop_probability = wx.TextCtrl(self, -1, \"MBH_time_hop_probability\")\r\n \r\n self.lblsnopt_feasibility_tolerance = wx.StaticText(self, -1, \"Feasibility tolerance\")\r\n self.txtsnopt_feasibility_tolerance = wx.TextCtrl(self, -1, \"snopt_feasibility_tolerance\")\r\n \r\n self.lblsnopt_major_iterations = wx.StaticText(self, -1, \"SNOPT major iterations limit\")\r\n self.txtsnopt_major_iterations = wx.TextCtrl(self, -1, \"snopt_major_iterations\")\r\n \r\n self.lblsnopt_max_run_time = wx.StaticText(self, -1, \"SNOPT maximum run time\")\r\n self.txtsnopt_max_run_time = wx.TextCtrl(self, -1, \"snopt_max_run_time\")\r\n \r\n self.lblderivative_type = wx.StaticText(self, -1, \"Derivative calculation method\")\r\n derivativechoices = [\"Finite Differencing\",\"Analytical flybys and objective function\",\"Analytical all but time\",\"All but current phase flight time derivatives\",\"Fully analytical (experimental)\"]\r\n self.cmbderivative_type = wx.ComboBox(self, -1, choices = derivativechoices, style = wx.CB_READONLY)\r\n \r\n self.lblcheck_derivatives = wx.StaticText(self, -1, \"Check derivatives via finite differencing?\")\r\n self.chkcheck_derivatives = wx.CheckBox(self, -1)\r\n \r\n self.lblseed_MBH = wx.StaticText(self, -1, \"Seed MBH?\")\r\n self.chkseed_MBH = wx.CheckBox(self, -1)\r\n\r\n self.lblinitial_guess_control_coordinate_system = wx.StaticText(self, -1, \"Initial guess control coordinate system\")\r\n control_coordinate_choices = ['Cartesian','Polar']\r\n self.cmbinitial_guess_control_coordinate_system = wx.ComboBox(self, -1, choices = control_coordinate_choices, style=wx.CB_READONLY)\r\n \r\n self.lblinterpolate_initial_guess = wx.StaticText(self, -1, \"Interpolate initial guess?\")\r\n self.chkinterpolate_initial_guess = wx.CheckBox(self, -1)\r\n \r\n self.lblinitial_guess_num_timesteps = wx.StaticText(self, -1, \"Number of timesteps used to create initial guess\")\r\n self.txtinitial_guess_num_timesteps = wx.TextCtrl(self, -1, \"initial_guess_num_timesteps\")\r\n \r\n self.lblinitial_guess_step_size_distribution = wx.StaticText(self, -1, \"Initial guess step size distribution\")\r\n initialguessdistributionchoices = [\"Uniform\",\"Gaussian\",\"Cauchy\"]\r\n self.cmbinitial_guess_step_size_distribution = wx.ComboBox(self, -1, choices = initialguessdistributionchoices, style=wx.CB_READONLY)\r\n\r\n self.lblinitial_guess_step_size_stdv_or_scale = wx.StaticText(self, -1, \"Initial guess scale width/standard deviation\")\r\n self.txtinitial_guess_step_size_stdv_or_scale = wx.TextCtrl(self, -1, \"initial_guess_step_size_stdv_or_scale\")\r\n\r\n self.lblMBH_zero_control_initial_guess = wx.StaticText(self, -1, \"Zero-control initial guess\")\r\n MBH_zero_control_initial_guess_options = ['do not use','zero-control for resets, random perturbations for hops','always use zero-control guess except when seeded']\r\n self.cmbMBH_zero_control_initial_guess = wx.ComboBox(self, -1, choices = MBH_zero_control_initial_guess_options, style=wx.CB_READONLY)\r\n \r\n innerloopgrid.AddMany( [self.lblInnerLoopSolver, self.cmbInnerLoopSolver,\r\n self.lblNLP_solver_type, self.cmbNLP_solver_type,\r\n self.lblNLP_solver_mode, self.cmbNLP_solver_mode,\r\n self.lblquiet_NLP, self.chkquiet_NLP,\r\n self.lblquiet_MBH, self.chkquiet_MBH,\r\n self.lblMBH_two_step, self.chkMBH_two_step,\r\n self.lblFD_stepsize, self.txtFD_stepsize,\r\n self.lblFD_stepsize_coarse, self.txtFD_stepsize_coarse,\r\n self.lblACE_feasible_point_finder, self.chkACE_feasible_point_finder,\r\n self.lblMBH_max_not_improve, self.txtMBH_max_not_improve,\r\n self.lblMBH_max_trials, self.txtMBH_max_trials,\r\n self.lblMBH_max_run_time, self.txtMBH_max_run_time,\r\n self.lblMBH_hop_distribution, self.cmbMBH_hop_distribution,\r\n self.lblMBH_max_step_size, self.txtMBH_max_step_size,\r\n self.lblMBH_Pareto_alpha, self.txtMBH_Pareto_alpha,\r\n self.lblMBH_time_hop_probability, self.txtMBH_time_hop_probability,\r\n self.lblsnopt_feasibility_tolerance, self.txtsnopt_feasibility_tolerance,\r\n self.lblsnopt_major_iterations, self.txtsnopt_major_iterations,\r\n self.lblsnopt_max_run_time, self.txtsnopt_max_run_time,\r\n self.lblderivative_type, self.cmbderivative_type,\r\n self.lblcheck_derivatives, self.chkcheck_derivatives,\r\n self.lblseed_MBH, self.chkseed_MBH,\r\n self.lblinitial_guess_control_coordinate_system, self.cmbinitial_guess_control_coordinate_system,\r\n self.lblinterpolate_initial_guess, self.chkinterpolate_initial_guess,\r\n self.lblinitial_guess_num_timesteps, self.txtinitial_guess_num_timesteps,\r\n self.lblinitial_guess_step_size_distribution, self.cmbinitial_guess_step_size_distribution,\r\n self.lblinitial_guess_step_size_stdv_or_scale, self.txtinitial_guess_step_size_stdv_or_scale,\r\n self.lblMBH_zero_control_initial_guess, self.cmbMBH_zero_control_initial_guess])\r\n \r\n outerloopgrid = wx.GridSizer(12,2,0,0)\r\n \r\n self.lblrun_outerloop = wx.StaticText(self, -1, \"Outer-Loop Solver\")\r\n outerloop_choices = [\"None\",\"Genetic Algorithm\"]\r\n self.cmbrun_outerloop = wx.ComboBox(self, -1, choices=outerloop_choices, style = wx.CB_READONLY)\r\n \r\n self.lblouterloop_popsize = wx.StaticText(self, -1, \"Population size\")\r\n self.txtouterloop_popsize = wx.TextCtrl(self, -1, \"outerloop_popsize\")\r\n \r\n self.lblouterloop_genmax = wx.StaticText(self, -1, \"Maximum number of generations\")\r\n self.txtouterloop_genmax = wx.TextCtrl(self, -1, \"outerloop_genmax\")\r\n \r\n self.lblouterloop_tournamentsize = wx.StaticText(self, -1, \"Tournament size\")\r\n self.txtouterloop_tournamentsize = wx.TextCtrl(self, -1, \"outerloop_tournamentsize\")\r\n \r\n self.lblouterloop_CR = wx.StaticText(self, -1, \"Crossover ratio\")\r\n self.txtouterloop_CR = wx.TextCtrl(self, -1, \"outerloop_CR\")\r\n \r\n self.lblouterloop_mu = wx.StaticText(self, -1, \"Mutation rate\")\r\n self.txtouterloop_mu = wx.TextCtrl(self, -1, \"outerloop_mu\")\r\n \r\n self.lblouterloop_stallmax = wx.StaticText(self, -1, \"Maximum stall duration\")\r\n self.txtouterloop_stallmax = wx.TextCtrl(self, -1, \"outerloop_stallmax\")\r\n \r\n self.lblouterloop_tolfit = wx.StaticText(self, -1, \"Fitness tolerance\")\r\n self.txtouterloop_tolfit = wx.TextCtrl(self, -1, \"outerloop_tolfit\")\r\n \r\n self.lblouterloop_ntrials = wx.StaticText(self, -1, \"Number of trials\")\r\n self.txtouterloop_ntrials = wx.TextCtrl(self, -1, \"outerloop_ntrials\")\r\n \r\n self.lblouterloop_elitecount = wx.StaticText(self, -1, \"Number of elite individuals\")\r\n self.txtouterloop_elitecount = wx.TextCtrl(self, -1, \"outerloop_elitecount\")\r\n \r\n self.lblouterloop_useparallel = wx.StaticText(self, -1, \"Run outer-loop GA in parallel?\")\r\n self.chkouterloop_useparallel = wx.CheckBox(self, -1)\r\n \r\n self.lblouterloop_warmstart = wx.StaticText(self, -1, \"Warm-start the outer-loop?\")\r\n self.txtouterloop_warmstart = wx.TextCtrl(self, -1, \"outerloop_warmstart\")\r\n \r\n outerloopgrid.AddMany([self.lblrun_outerloop, self.cmbrun_outerloop,\r\n self.lblouterloop_popsize, self.txtouterloop_popsize,\r\n self.lblouterloop_genmax, self.txtouterloop_genmax,\r\n self.lblouterloop_tournamentsize, self.txtouterloop_tournamentsize,\r\n self.lblouterloop_CR, self.txtouterloop_CR,\r\n self.lblouterloop_mu, self.txtouterloop_mu,\r\n self.lblouterloop_stallmax, self.txtouterloop_stallmax,\r\n self.lblouterloop_tolfit, self.txtouterloop_tolfit,\r\n self.lblouterloop_ntrials, self.txtouterloop_ntrials,\r\n self.lblouterloop_elitecount, self.txtouterloop_elitecount,\r\n self.lblouterloop_warmstart, self.txtouterloop_warmstart])\r\n\r\n \r\n vboxleft = wx.BoxSizer(wx.VERTICAL)\r\n vboxright = wx.BoxSizer(wx.VERTICAL)\r\n lblLeftTitle = wx.StaticText(self, -1, \"Inner-Loop Solver Parameters\")\r\n lblRightTitle = wx.StaticText(self, -1, \"Outer-Loop Solver Parameters\")\r\n vboxleft.Add(lblLeftTitle)\r\n vboxleft.Add(innerloopgrid)\r\n vboxright.Add(lblRightTitle)\r\n vboxright.Add(outerloopgrid)\r\n \r\n font = self.GetFont()\r\n font.SetWeight(wx.FONTWEIGHT_BOLD)\r\n lblLeftTitle.SetFont(font)\r\n lblRightTitle.SetFont(font)\r\n\r\n \r\n hbox = wx.BoxSizer(wx.HORIZONTAL)\r\n \r\n hbox.Add(vboxleft)\r\n hbox.AddSpacer(20)\r\n hbox.Add(vboxright)\r\n \r\n \r\n self.lbltrialX = wx.StaticText(self, -1, \"Trial decision vector or initial guess\")\r\n self.txttrialX = wx.TextCtrl(self, -1, style=wx.TE_MULTILINE, size = (700,300))\r\n self.btntrialX = wx.Button(self, -1, \"...\")\r\n trialbox = wx.BoxSizer(wx.HORIZONTAL)\r\n trialbox.AddMany([self.lbltrialX, self.btntrialX])\r\n\r\n self.mainbox = wx.BoxSizer(wx.VERTICAL)\r\n self.mainbox.AddMany([hbox, trialbox, self.txttrialX])\r\n\r\n self.SetSizer(self.mainbox)\r\n self.SetupScrolling()\r\n\r\nclass PhysicsOptionsPanel(wx.lib.scrolledpanel.ScrolledPanel):\r\n def __init__(self, parent):\r\n \r\n wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent)\r\n\r\n ephemerisgrid = wx.GridSizer(4,2,5,5)\r\n perturbgrid = wx.GridSizer(4,2,5,5)\r\n \r\n self.lblephemeris_source = wx.StaticText(self, -1, \"Ephemeris Source\")\r\n ephemeris_source_typestypes = ['Static','SPICE']\r\n self.cmbephemeris_source = wx.ComboBox(self, -1, choices = ephemeris_source_typestypes, style=wx.CB_READONLY)\r\n\r\n self.lblSPICE_leap_seconds_kernel = wx.StaticText(self, -1, \"Leap seconds kernel\")\r\n self.txtSPICE_leap_seconds_kernel = wx.TextCtrl(self, -1, \"SPICE_leap_seconds_kernel\", size=(200,-1))\r\n\r\n self.lblSPICE_reference_frame_kernel = wx.StaticText(self, -1, \"Frame kernel\")\r\n self.txtSPICE_reference_frame_kernel = wx.TextCtrl(self, -1, \"SPICE_reference_frame_kernel\", size=(200,-1))\r\n\r\n self.lbluniverse_folder = wx.StaticText(self, -1, \"Universe folder\")\r\n self.txtuniverse_folder = wx.TextCtrl(self, -1, \"universe_folder\", size=(400,-1))\r\n self.btnGetNewUniverseFolder = wx.Button(self, -1, \"...\")\r\n self.btnSetDefaultUniverse = wx.Button(self, -1, \"Default\")\r\n UniverseButtonSizer = wx.BoxSizer(wx.HORIZONTAL)\r\n UniverseButtonSizer.AddMany([self.txtuniverse_folder, self.btnGetNewUniverseFolder, self.btnSetDefaultUniverse])\r\n\r\n self.lblperturb_SRP = wx.StaticText(self, -1, \"Enable SRP\")\r\n self.chkperturb_SRP = wx.CheckBox(self, -1)\r\n\r\n self.lblperturb_thirdbody = wx.StaticText(self, -1, \"Enable third body\")\r\n self.chkperturb_thirdbody = wx.CheckBox(self, -1)\r\n\r\n self.lblspacecraft_area = wx.StaticText(self, -1, \"Spacecraft area (in m^2)\")\r\n self.txtspacecraft_area = wx.TextCtrl(self, -1, \"spacecraft_area\")\r\n\r\n self.lblcoefficient_of_reflectivity = wx.StaticText(self, -1, \"Coefficient of reflectivity\")\r\n self.txtcoefficient_of_reflectivity = wx.TextCtrl(self, -1, \"coefficient_of_reflectivity\")\r\n\r\n ephemerisgrid.AddMany([self.lblephemeris_source, self.cmbephemeris_source,\r\n self.lblSPICE_leap_seconds_kernel, self.txtSPICE_leap_seconds_kernel,\r\n self.lblSPICE_reference_frame_kernel, self.txtSPICE_reference_frame_kernel,\r\n self.lbluniverse_folder, UniverseButtonSizer])\r\n perturbgrid.AddMany([ self.lblperturb_SRP, self.chkperturb_SRP,\r\n self.lblperturb_thirdbody, self.chkperturb_thirdbody,\r\n self.lblspacecraft_area, self.txtspacecraft_area,\r\n self.lblcoefficient_of_reflectivity, self.txtcoefficient_of_reflectivity])\r\n\r\n\r\n\r\n\r\n lblLeftTitle = wx.StaticText(self, -1, \"Ephemeris settings\")\r\n vboxleft = wx.BoxSizer(wx.VERTICAL)\r\n vboxleft.AddMany([lblLeftTitle, ephemerisgrid])\r\n\r\n lblRightTitle = wx.StaticText(self, -1, \"Perturbation settings\")\r\n vboxright = wx.BoxSizer(wx.VERTICAL)\r\n vboxright.AddMany([lblRightTitle, perturbgrid])\r\n\r\n font = self.GetFont()\r\n font.SetWeight(wx.FONTWEIGHT_BOLD)\r\n lblLeftTitle.SetFont(font)\r\n lblRightTitle.SetFont(font)\r\n\r\n self.mainbox = wx.BoxSizer(wx.HORIZONTAL)\r\n \r\n self.mainbox.Add(vboxleft)\r\n self.mainbox.AddSpacer(20)\r\n self.mainbox.Add(vboxright)\r\n\r\n\r\n spiralgrid = wx.GridSizer(2,2,5,5)\r\n self.lblspiral_model_type = wx.StaticText(self, -1, \"Spiral model type\")\r\n spiral_model_choices = ['Battin','Edelbaum']\r\n self.cmbspiral_model_type = wx.ComboBox(self, -1, choices = spiral_model_choices, style = wx.CB_READONLY)\r\n spiralgrid.AddMany([self.lblspiral_model_type, self.cmbspiral_model_type])\r\n lblBottomTitle = wx.StaticText(self, -1, \"Spiral settings\")\r\n lblBottomTitle.SetFont(font)\r\n vboxspiral = wx.BoxSizer(wx.VERTICAL)\r\n vboxspiral.AddMany([lblBottomTitle, spiralgrid])\r\n\r\n lambertgrid = wx.GridSizer(2,2,5,5)\r\n self.lbllambert_type = wx.StaticText(self, -1, \"Lambert solver type\")\r\n lambert_choices = ['Arora-Russell','Izzo (not included in open-source)']\r\n self.cmblambert_type = wx.ComboBox(self, -1, choices = lambert_choices, style = wx.CB_READONLY)\r\n lambertgrid.AddMany([self.lbllambert_type, self.cmblambert_type])\r\n lblLambertTitle = wx.StaticText(self, -1, \"Lambert settings\")\r\n lblLambertTitle.SetFont(font)\r\n vboxlambert = wx.BoxSizer(wx.VERTICAL)\r\n vboxlambert.AddMany([lblLambertTitle, lambertgrid])\r\n\r\n self.mainvbox = wx.BoxSizer(wx.VERTICAL)\r\n self.mainvbox.Add(self.mainbox)\r\n self.mainvbox.AddSpacer(20)\r\n self.mainvbox.AddMany([vboxspiral, vboxlambert])\r\n\r\n self.SetSizer(self.mainvbox)\r\n self.SetupScrolling()\r\n\r\nclass OutputOptionsPanel(wx.lib.scrolledpanel.ScrolledPanel):\r\n def __init__(self, parent):\r\n \r\n wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent)\r\n\r\n self.mainbox = wx.FlexGridSizer(20,2,5,5)\r\n\r\n self.lblcreate_GMAT_script = wx.StaticText(self, -1, \"Create GMAT scripts\")\r\n self.chkcreate_GMAT_script = wx.CheckBox(self, -1)\r\n\r\n self.lbloutput_units = wx.StaticText(self, -1, \"Output units\")\r\n outputchoices = ['km and km/s','LU and LU/day']\r\n self.cmboutput_units = wx.ComboBox(self, -1, choices=outputchoices, style=wx.CB_READONLY)\r\n \r\n self.lbloutput_dormant_journeys = wx.StaticText(self, -1, \"Output journey entries for wait times at intermediate and final target?\")\r\n self.chkoutput_dormant_journeys = wx.CheckBox(self, -1)\r\n\r\n self.lblpost_mission_wait_time = wx.StaticText(self, -1, \"Stay time at the final target\")\r\n self.txtpost_mission_wait_time = wx.TextCtrl(self, -1, \"post_mission_wait_time\")\r\n\r\n self.lblgenerate_initial_guess_file = wx.StaticText(self, -1, \"Generate initial guess file? (experimental!)\")\r\n self.chkgenerate_initial_guess_file = wx.CheckBox(self, -1)\r\n\r\n self.lblmission_type_for_initial_guess_file = wx.StaticText(self, -1, \"Mission type for initial guess file\")\r\n initial_guess_file_choices = [\"MGA\",\"MGADSM\",\"MGALT\",\"FBLT\",\"MGANDSM\",\"PSBI\"]\r\n self.cmbmission_type_for_initial_guess_file = wx.ComboBox(self, -1, choices=initial_guess_file_choices, style=wx.CB_READONLY)\r\n\r\n self.lbloverride_working_directory = wx.StaticText(self, -1, \"Override working directory?\")\r\n self.chkoverride_working_directory = wx.CheckBox(self, -1)\r\n\r\n self.lblforced_working_directory = wx.StaticText(self, -1, \"Working directory\")\r\n self.txtforced_working_directory = wx.TextCtrl(self, -1, \"forced_working_directory\", size=(600,-1))\r\n self.btnforced_working_directory = wx.Button(self, -1, \"...\")\r\n working_directory_sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n working_directory_sizer.AddMany([self.txtforced_working_directory, self.btnforced_working_directory])\r\n\r\n self.lblgenerate_forward_integrated_ephemeris = wx.StaticText(self, -1, \"Generate forward-integrated STK-compatible ephemeris\")\r\n self.chkgenerate_forward_integrated_ephemeris = wx.CheckBox(self, -1)\r\n\r\n self.lblbackground_mode = wx.StaticText(self, -1, \"Enable background mode?\")\r\n self.chkbackground_mode = wx.CheckBox(self, -1)\r\n\r\n self.mainbox.AddMany([ self.lbloutput_dormant_journeys, self.chkoutput_dormant_journeys,\r\n self.lblpost_mission_wait_time, self.txtpost_mission_wait_time,\r\n self.lblcreate_GMAT_script, self.chkcreate_GMAT_script,\r\n self.lbloutput_units, self.cmboutput_units,\r\n self.lblgenerate_initial_guess_file, self.chkgenerate_initial_guess_file,\r\n self.lblmission_type_for_initial_guess_file, self.cmbmission_type_for_initial_guess_file,\r\n self.lbloverride_working_directory, self.chkoverride_working_directory,\r\n self.lblforced_working_directory, working_directory_sizer,\r\n self.lblgenerate_forward_integrated_ephemeris, self.chkgenerate_forward_integrated_ephemeris,\r\n self.lblbackground_mode, self.chkbackground_mode])\r\n\r\n self.SetSizer(self.mainbox)\r\n self.SetupScrolling()\r\n\r\n \r\nclass OptionsBook(wx.Notebook):\r\n #class for Options notebook\r\n def __init__(self, parent):\r\n wx.Notebook.__init__(self, parent=parent, id=wx.ID_ANY, style=\r\n wx.BK_DEFAULT\r\n #wx.BK_TOP \r\n #wx.BK_BOTTOM\r\n #wx.BK_LEFT\r\n #wx.BK_RIGHT\r\n )\r\n \r\n font = self.GetFont()\r\n font.SetPointSize(10)\r\n self.SetFont(font)\r\n\r\n\r\n #create tabs\r\n self.tabGlobal = GlobalOptionsPanel(self)\r\n self.AddPage(self.tabGlobal, \"Global Mission Options\")\r\n self.tabSpacecraft = SpacecraftOptionsPanel(self)\r\n self.AddPage(self.tabSpacecraft, \"Spacecraft Options\")\r\n self.tabJourney = JourneyOptionsPanel(self)\r\n self.AddPage(self.tabJourney, \"Journey Options\")\r\n self.tabSolver = SolverOptionsPanel(self)\r\n self.AddPage(self.tabSolver, \"Solver Options\")\r\n self.tabPhysics = PhysicsOptionsPanel(self)\r\n self.AddPage(self.tabPhysics, \"Physics Options\")\r\n self.tabOutput = OutputOptionsPanel(self)\r\n self.AddPage(self.tabOutput, \"Output Options\")","sub_path":"branch/EMTG_student/PyEMTG/OptionsNotebook.py","file_name":"OptionsNotebook.py","file_ext":"py","file_size_in_byte":74018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"431063408","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\nimport argparse, sys, os\nfrom glob import glob\n\n\nclass TruncateCSV(object):\n def __init__(self, args):\n self.args = args\n self.config()\n\n def config(self):\n \"\"\"configure options\"\"\"\n if self.args.cd != None:\n self.chdir()\n\n if self.args.files != None:\n self.csvs = self.args.files\n else:\n self.csvs = glob(self.args.glob)\n\n if self.args.ext[0] != \".\":\n self.args.ext = (\".\" + self.args.ext)\n\n def chdir(self):\n \"\"\"allows user to fix bad directory input\"\"\"\n try:\n os.chdir(self.args.cd)\n\n except: # bad path\n err = \"Could not change to directory {}\".format(self.args.cd)\n self.check_input(err, \"directory\", \"os.path.isdir('{}')\",\n \"os.chdir('{}')\")\n\n def check_input(self, err_msg, checking, exit_condition, hook=None):\n \"\"\"asks user for input and returns answer if it meets a condition\"\"\"\n print(err_msg)\n while True:\n print()\n prompt = \"Reenter {}; ('e' to exit): \".format(checking)\n response = input(prompt)\n if response.lower() == \"e\":\n print(\"Exiting...\")\n sys.exit()\n else:\n try:\n command = exit_condition.format(response)\n path_status = eval(command)\n\n if path_status:\n if hook != None:\n hook = hook.format(response)\n exec(hook)\n break\n except:\n print(\"Try again.\")\n return response\n\n\n def handle_file(self, csv):\n \"\"\"truncates file creates backup based on arguments\"\"\"\n lines = self.csvread(csv)\n if not self.args.overwrite:\n backup = csv + self.args.ext\n self.csvwrite(backup, lines)\n\n else:\n self.csvwrite(csv, lines)\n\n def csvread(self, csv):\n \"\"\"opens a csv file and returns lines greater than a certain index\"\"\"\n with open(csv, 'r', encoding='utf-8', errors='replace') as to_trunc:\n lines = to_trunc.readlines()\n return lines[self.args.num:]\n\n def csvwrite(self, csv, lines):\n \"\"\"writes to file with list of lines provided\"\"\"\n with open(csv, 'w', encoding='utf-8') as to_write:\n to_write.writelines(lines)\n\n def fix_file(self, fnf):\n \"\"\"allows user to correct a bad file input\"\"\"\n err = \"File {} not found in {}\".format(fnf, os.getcwd())\n new_file = self.check_input(err, \"filename\", \"os.path.isfile('{}')\")\n return new_file\n\n\ndef parser():\n if len(sys.argv[1:]) == 0:\n sys.argv.append(\"-h\")\n\n parser = argparse.ArgumentParser(\n description=\"Remove unneeded headers from data files\",\n usage=\"num [-hvd DIR] [-o | -e EXT] [(-f FILE)... | -g GLOB]\",\n epilog=\"\"\"NOTE: glob patterns should be put in quotes; filenames\n listed with the -f flag should not be relative paths if the -d flag\n is also enabled.\"\"\")\n\n# if you want script to exit if number not provided (no default value)\n parser.add_argument(\"num\",\n type=int,\n help='the number of lines to be truncated')\n\n# if you want a default value for num as pos. arg\n # parser.add_argument(\"num\",\n # nargs='?',\n # default=1,\n # type=int,\n # help='the number of lines to be truncated')\n\n parser.add_argument(\"-d\",\"--dir\",\n metavar=\"DIR\",\n dest=\"cd\",\n help=\"the target directory to truncate all CSVs in\")\n\n files_grp = parser.add_mutually_exclusive_group()\n files_grp.add_argument(\"-f\",\"--file\",\n metavar=\"FILE\",\n dest=\"files\",\n action=\"append\",\n help=\"files to truncate\")\n files_grp.add_argument(\"-g\",\"--glob\",\n default=\"*.csv\",\n help=\"glob pattern to match files [default: %(default)s]\")\n\n backup_grp = parser.add_mutually_exclusive_group()\n backup_grp.add_argument(\"-o\",\"--overwrite\",\n action=\"store_true\",\n help=\"overwrite files without making backups\")\n backup_grp.add_argument(\"-e\",\"--ext\",\n default=\".trc\",\n help=\"backup extension [default: %(default)s]\")\n\n parser.add_argument(\"-v\",\"--version\",\n action=\"version\",\n version=\"%(prog)s 2.0.1 (test)\")\n\n args = parser.parse_args()\n return args\n\n\n\ndef main():\n args = parser()\n to_trunc = TruncateCSV(args)\n\n if len(to_trunc.csvs) > 0:\n for csv in to_trunc.csvs:\n\n try:\n to_trunc.handle_file(csv)\n\n except FileNotFoundError:\n new_file = to_trunc.fix_file(csv)\n to_trunc.handle_file(new_file)\n\n else:\n no_files = \"No files were found matching pattern {} in {}\"\n print(no_files.format(to_trunc.args.glob, os.getcwd()))\n\n\nif __name__ == \"__main__\":\n main()\n\n# TODO:\n","sub_path":"python/trunchead_csv.py","file_name":"trunchead_csv.py","file_ext":"py","file_size_in_byte":5109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"557242707","text":"\"\"\"\nReads every line from the CSV and import it into the DB\nThe CSV file has been cleaned as to keep this script as simple as possible.\n\"\"\"\n\nimport csv\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.engine.url import URL\nimport settings\n\nengine = create_engine(URL(**settings.DATABASE))\ndb = scoped_session(sessionmaker(bind=engine))\n\n\ndef main():\n file = open(\"books.csv\")\n reader = csv.reader(file)\n for isbn, title, author, year in reader:\n db.execute(\"INSERT INTO books\"\n \"(isbn, title, author, year)\"\n \"VALUES (:isbn, :title, :author, :year)\",\n {\"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year})\n\n print(f\"added {title} by {author} to the database\")\n db.commit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"import_books.py","file_name":"import_books.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"142294588","text":"from __future__ import unicode_literals\nimport re\nfrom past.builtins import cmp\nimport functools\nimport frappe, erpnext\nfrom erpnext.accounts.report.utils import get_currency, convert_to_presentation_currency\nfrom erpnext.accounts.utils import get_fiscal_year\nfrom frappe import _\nfrom six import itervalues\nfrom frappe.utils import (flt, getdate, get_first_day, add_months, add_days, formatdate, cstr, cint)\nfrom erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions, get_dimension_with_children\n\n\ndef get_budget_data(\n\t\tcompany, root_type, period_list, filters=None,\n\t\taccumulated_values=1, only_current_fiscal_year=True, ignore_closing_entries=False,\n\t\tignore_accumulated_values_for_fy=False , total = True):\n\n\taccounts = get_budget_accounts(company, root_type)\n\tif not accounts:\n\t\treturn None\n\n\taccounts, accounts_by_name, parent_children_map = filter_budget_accounts(accounts)\n\n\tcompany_currency = get_appropriate_currency(company, filters)\n\n\tbudget_distribution_by_account = {}\n\tfor root in frappe.db.sql(\"\"\"select lft, rgt from tabAccount\n\t\t\twhere root_type=%s and ifnull(parent_account, '') = ''\"\"\", root_type, as_dict=1):\n\n\t\tset_budget_distribution_by_account(\n\t\t\tcompany,\n\t\t\tperiod_list[0][\"year_start_date\"] if only_current_fiscal_year else None,\n\t\t\tperiod_list[-1][\"to_date\"],\n\t\t\troot.lft, root.rgt, filters,\n\t\t\tbudget_distribution_by_account, ignore_closing_entries=ignore_closing_entries\n\t\t)\n\tcalculate_values(\n\t\taccounts_by_name, budget_distribution_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy)\n\taccumulate_values_into_parents(accounts, accounts_by_name, period_list, accumulated_values)\n\tout = prepare_data(accounts, period_list, company_currency)\n\tout = filter_out_zero_value_rows(out, parent_children_map)\n\n\tif out and total:\n\t\tadd_total_row(out, root_type, period_list, company_currency)\n\n\treturn out\n\ndef get_appropriate_currency(company, filters=None):\n\tif filters and filters.get(\"presentation_currency\"):\n\t\treturn filters[\"presentation_currency\"]\n\telse:\n\t\treturn frappe.get_cached_value('Company', company, \"default_currency\")\n\ndef get_budget_accounts(company, root_type):\n\treturn frappe.db.sql(\"\"\"\n\t\tselect name, account_number, parent_account, lft, rgt, root_type, report_type, account_name, include_in_gross, account_type, is_group, lft, rgt\n\t\tfrom `tabAccount`\n\t\twhere company=%s and root_type=%s order by lft\"\"\", (company, root_type), as_dict=True)\n\ndef set_budget_distribution_by_account(\n\t\tcompany, from_date, to_date, root_lft, root_rgt, filters, budget_distribution_by_account, ignore_closing_entries=False):\n\t\"\"\"Returns a dict like { \"account\": [gl entries], ... }\"\"\"\n\tadditional_conditions = get_additional_conditions(from_date, ignore_closing_entries, filters)\n\n\taccounts = frappe.db.sql_list(\"\"\"select name from `tabAccount`\n\t\twhere lft >= %s and rgt <= %s and company = %s\"\"\", (root_lft, root_rgt, company))\n\n\tif accounts:\n\t\tadditional_conditions += \" and ba.account in ({})\"\\\n\t\t\t.format(\", \".join([frappe.db.escape(d) for d in accounts]))\n\n\t\tgl_filters = {\n\t\t\t\"company\": company,\n\t\t\t\"from_date\": from_date,\n\t\t\t\"to_date\": to_date,\n\t\t\t\"finance_book\": cstr(filters.get(\"finance_book\"))\n\t\t}\n\n\t\tfor key, value in filters.items():\n\t\t\tif value:\n\t\t\t\tgl_filters.update({\n\t\t\t\t\tkey: value\n\t\t\t\t})\n\n\t\tbudget_entries = frappe.db.sql(\"\"\"\n\t\t\tselect \n\t\t\t\tfa.year_start_date, \n\t\t\t\tb.budget_against, \n\t\t\t\tb.cost_center, \n\t\t\t\tb.project,\n\t\t\t\tb.payroll_entry, \n\t\t\t\tb.fiscal_year, \n\t\t\t\tba.account, \n\t\t\t\tba.budget_amount, \n\t\t\t\tb.monthly_distribution\n\t\t\tfrom \n\t\t\t\t`tabBudget` b, \n\t\t\t\t`tabBudget Account` ba, \n\t\t\t\t`tabFiscal Year` fa \n\t\t\twhere\n\t\t\t\tb.name = ba.parent \n\t\t\t\tand b.docstatus = 1\t\t\t\t\n\t\t\t\tand b.fiscal_year = fa.name \n\t\t\t\tand b.company=%(company)s {additional_conditions} \n\t\t\t\tand fa.year_start_date >= %(from_date)s \n\t\t\t\tand fa.year_end_date <= %(to_date)s \n\t\t\torder by ba.account\"\"\".format(additional_conditions=additional_conditions), gl_filters, as_dict=True)\n\n\n\t\tfor budget in budget_entries:\n\t\t\tif budget['monthly_distribution']:\n\t\t\t\t#print(\"dddd\")\n\t\t\t\tget_distribution_budget_by_percentage(budget['account'], budget['fiscal_year'], budget['monthly_distribution'], budget['budget_amount'],budget_distribution_by_account)\n\t\t\t#else:\n\t\t\t\t#print(\"asdf\")\n\t\t\t\t#get_distribution_budget(budget['account'], budget['fiscal_year'], budget['budget_amount'],budget_distribution_by_account)\n\n\t\treturn budget_distribution_by_account\n\ndef get_distribution_budget_by_percentage(account, fiscal_year, monthly_distribution, budget_amount, budget_distribution_by_account):\n\tmdps = frappe.db.sql(\"\"\"\n\t\tSELECT\n\t\t\tIF ( YEAR(fy.year_start_date ) = YEAR(fy.year_end_date),\n\t\t\t\t\tDATE(CONCAT_WS('-', md.fiscal_year, month(str_to_date(LEFT(mdp.month,3),'%%b')), 1))\n \t\t, IF( MONTH(fy.year_start_date ) > month(str_to_date(LEFT(mdp.month,3),'%%b')),\n \t\t\tDATE(CONCAT_WS('-', YEAR(fy.year_end_date ), month(str_to_date(LEFT(mdp.month,3),'%%b')), 1)),\n \t\t\tDATE(CONCAT_WS('-', YEAR(fy.year_start_date), month(str_to_date(LEFT(mdp.month,3),'%%b')), 1))\n \t\t)\n \t) AS posting_date,\n\t\t\tmd.fiscal_year, \t\t\n\t\t\tmdp.percentage_allocation,\n\t\t\t'' AS account,\n\t\t\t'' AS total_budget_amount\n\t\tFROM \n\t\t\t`tabMonthly Distribution` md,\n\t\t\t`tabMonthly Distribution Percentage` mdp,\n \t\t`tabFiscal Year` fy\n\t\tWHERE\n\t\t\tmd.name = mdp.parent\n \t\tand md.fiscal_year = fy.name\n \t\tand md.fiscal_year = %(fiscal_year)s\n \t\tand md.name = %(monthly_distribution)s \"\"\",{ \"fiscal_year\": fiscal_year, \"monthly_distribution\": monthly_distribution},as_dict=True)\n\t\n\tfor v in mdps:\n\t\tv['account'] = account\n\t\tv['total_budget_amount'] = (budget_amount*v['percentage_allocation'])/100\n\t\tbudget_distribution_by_account.setdefault(account, []).append(v)\n\t#print(budget_distribution_by_account)\n\treturn budget_distribution_by_account\n\t\ndef get_distribution_budget(account, fiscal_year, budget_amount, budget_distribution_by_account):\n\tfiscal_year_start_date = frappe.db.get_value(\"Fiscal Year\", fiscal_year, \"year_start_date\")\n\tv={\n\t\t\"posting_date\": '',\n\t\t\"account\": account,\n\t\t\"fiscal_year\": fiscal_year,\n\t\t\"total_budget_amount\":''\n\t}\n\n\tfor x in range(12):\t\t\n\t\tv[\"posting_date\"] = add_months(fiscal_year_start_date, x)\n\t\tv[\"account\"] = account\n\t\tv[\"total_budget_amount\"] = (8.333 * budget_amount) / 100 \n\t\tbudget_distribution_by_account.setdefault(account, []).append(v)\n\t#print(budget_distribution_by_account)\n\treturn budget_distribution_by_account\n\t\ndef calculate_values(\n\t\taccounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy):\n\tfor entries in itervalues(gl_entries_by_account):\n\t\tfor entry in entries:\n\t\t\td = accounts_by_name.get(entry.account)\n\t\t\tif not d:\n\t\t\t\tfrappe.msgprint(\n\t\t\t\t\t_(\"Could not retrieve information for {0}.\".format(entry.account)), title=\"Error\",\n\t\t\t\t\traise_exception=1\n\t\t\t\t)\n\t\t\tfor period in period_list:\n\t\t\t\t# check if posting date is within the period\n\n\t\t\t\tif entry.posting_date <= period.to_date:\n\t\t\t\t\tif (accumulated_values or entry.posting_date >= period.from_date) and \\\n\t\t\t\t\t\t(not ignore_accumulated_values_for_fy or\n\t\t\t\t\t\t\tentry.fiscal_year == period.to_date_fiscal_year):\n\t\t\t\t\t\td[period.key] = d.get(period.key, 0.0) + flt(entry.total_budget_amount)\n\n\t\t\t#if entry.posting_date < period_list[0].year_start_date:\n\t\t\t#\td[\"opening_balance\"] = d.get(\"opening_balance\", 0.0) + flt(entry.debit) - flt(entry.credit)\n\ndef filter_budget_accounts(accounts, depth=20):\n\tparent_children_map = {}\n\taccounts_by_name = {}\n\tfor d in accounts:\n\t\taccounts_by_name[d.name] = d\n\t\tparent_children_map.setdefault(d.parent_account or None, []).append(d)\n\n\tfiltered_accounts = []\n\n\tdef add_to_list(parent, level):\n\t\tif level < depth:\n\t\t\tchildren = parent_children_map.get(parent) or []\n\t\t\tsort_accounts(children, is_root=True if parent==None else False)\n\n\t\t\tfor child in children:\n\t\t\t\tchild.indent = level\n\t\t\t\tfiltered_accounts.append(child)\n\t\t\t\tadd_to_list(child.name, level + 1)\n\n\tadd_to_list(None, 0)\n\n\treturn filtered_accounts, accounts_by_name, parent_children_map\n\ndef sort_accounts(accounts, is_root=False, key=\"name\"):\n\t\"\"\"Sort root types as Asset, Liability, Equity, Income, Expense\"\"\"\n\n\tdef compare_accounts(a, b):\n\t\tif re.split('\\W+', a[key])[0].isdigit():\n\t\t\t# if chart of accounts is numbered, then sort by number\n\t\t\treturn cmp(a[key], b[key])\n\t\telif is_root:\n\t\t\tif a.report_type != b.report_type and a.report_type == \"Balance Sheet\":\n\t\t\t\treturn -1\n\t\t\tif a.root_type != b.root_type and a.root_type == \"Asset\":\n\t\t\t\treturn -1\n\t\t\tif a.root_type == \"Liability\" and b.root_type == \"Equity\":\n\t\t\t\treturn -1\n\t\t\tif a.root_type == \"Income\" and b.root_type == \"Expense\":\n\t\t\t\treturn -1\n\t\telse:\n\t\t\t# sort by key (number) or name\n\t\t\treturn cmp(a[key], b[key])\n\t\treturn 1\n\n\taccounts.sort(key = functools.cmp_to_key(compare_accounts))\n\ndef get_additional_conditions(from_date, ignore_closing_entries, filters):\n\tadditional_conditions = []\n\n\taccounting_dimensions = get_accounting_dimensions(as_list=False)\n\n\tif filters:\n\t\tif filters.get(\"project\"):\n\t\t\tif not isinstance(filters.get(\"project\"), list):\n\t\t\t\tfilters.project = frappe.parse_json(filters.get(\"project\"))\n\n\t\t\tadditional_conditions.append(\"b.project in %(project)s\")\n\n\t\tif filters.get(\"cost_center\"):\n\t\t\tfilters.cost_center = get_cost_centers_with_children(filters.cost_center)\n\t\t\tadditional_conditions.append(\"b.cost_center in %(cost_center)s\")\n\n\tif accounting_dimensions:\n\t\tfor dimension in accounting_dimensions:\n\t\t\tif filters.get(dimension.fieldname):\n\t\t\t\tif frappe.get_cached_value('DocType', dimension.document_type, 'is_tree'):\n\t\t\t\t\tfilters[dimension.fieldname] = get_dimension_with_children(dimension.document_type,\n\t\t\t\t\t\tfilters.get(dimension.fieldname))\n\t\t\t\t\tadditional_conditions.append(\"{0} in %({0})s\".format(dimension.fieldname))\n\t\t\t\telse:\n\t\t\t\t\tadditional_conditions.append(\"{0} in (%({0})s)\".format(dimension.fieldname))\n\n\treturn \" and {}\".format(\" and \".join(additional_conditions)) if additional_conditions else \"\"\n\ndef accumulate_values_into_parents(accounts, accounts_by_name, period_list, accumulated_values):\n\t\"\"\"accumulate children's values in parent accounts\"\"\"\n\tfor d in reversed(accounts):\n\t\tif d.parent_account:\n\t\t\tfor period in period_list:\n\t\t\t\taccounts_by_name[d.parent_account][period.key] = \\\n\t\t\t\t\taccounts_by_name[d.parent_account].get(period.key, 0.0) + d.get(period.key, 0.0)\n\ndef prepare_data(accounts, period_list, company_currency):\n\tdata = []\n\tyear_start_date = period_list[0][\"year_start_date\"].strftime(\"%Y-%m-%d\")\n\tyear_end_date = period_list[-1][\"year_end_date\"].strftime(\"%Y-%m-%d\")\n\n\tfor d in accounts:\n\t\t# add to output\n\t\thas_value = False\n\t\ttotal = 0\n\t\trow = frappe._dict({\n\t\t\t\"account\": _(d.name),\n\t\t\t\"parent_account\": _(d.parent_account) if d.parent_account else '',\n\t\t\t\"indent\": flt(d.indent),\n\t\t\t\"year_start_date\": year_start_date,\n\t\t\t\"year_end_date\": year_end_date,\n\t\t\t\"currency\": company_currency,\n\t\t\t\"include_in_gross\": d.include_in_gross,\n\t\t\t\"account_type\": d.account_type,\n\t\t\t\"is_group\": d.is_group,\t\t\t\n\t\t\t\"account_name\": ('%s - %s' %(_(d.account_number), _(d.account_name))\n\t\t\t\tif d.account_number else _(d.account_name))\n\t\t})\n\t\tfor period in period_list:\n\t\t\trow[period.key] = flt(d.get(period.key, 0.0), 3)\n\n\t\t\tif abs(row[period.key]) >= 0.005:\n\t\t\t\t# ignore zero values\n\t\t\t\thas_value = True\n\t\t\t\ttotal += flt(row[period.key])\n\n\t\trow[\"has_value\"] = has_value\n\t\trow[\"total\"] = total\n\t\tdata.append(row)\n\n\treturn data\n\ndef filter_out_zero_value_rows(data, parent_children_map, show_zero_values=False):\n\tdata_with_value = []\n\tfor d in data:\n\t\tif show_zero_values or d.get(\"has_value\"):\n\t\t\tdata_with_value.append(d)\n\t\telse:\n\t\t\t# show group with zero balance, if there are balances against child\n\t\t\tchildren = [child.name for child in parent_children_map.get(d.get(\"account\")) or []]\n\t\t\tif children:\n\t\t\t\tfor row in data:\n\t\t\t\t\tif row.get(\"account\") in children and row.get(\"has_value\"):\n\t\t\t\t\t\tdata_with_value.append(d)\n\t\t\t\t\t\tbreak\n\n\treturn data_with_value\n\ndef add_total_row(out, root_type, period_list, company_currency):\n\ttotal_row = {\n\t\t\"account_name\": _(\"Total {0})\").format(_(root_type)),\n\t\t\"account\": _(\"Total {0} )\").format(_(root_type)),\n\t\t\"currency\": company_currency\n\t}\n\n\tfor row in out:\n\t\tif not row.get(\"parent_account\"):\n\t\t\tfor period in period_list:\n\t\t\t\ttotal_row.setdefault(period.key, 0.0)\n\t\t\t\ttotal_row[period.key] += row.get(period.key, 0.0)\n\t\t\t\trow[period.key] = row.get(period.key, 0.0)\n\n\t\t\ttotal_row.setdefault(\"total\", 0.0)\n\t\t\ttotal_row[\"total\"] += flt(row[\"total\"])\n\t\t\trow[\"total\"] = \"\"\n\n\tif \"total\" in total_row:\n\t\tout.append(total_row)\n\n\t\t# blank row after Total\n\t\tout.append({})\n\ndef get_cost_centers_with_children(cost_centers):\n\tif not isinstance(cost_centers, list):\n\t\tcost_centers = [d.strip() for d in cost_centers.strip().split(',') if d]\n\n\tall_cost_centers = []\n\tfor d in cost_centers:\n\t\tif frappe.db.exists(\"Cost Center\", d):\n\t\t\tlft, rgt = frappe.db.get_value(\"Cost Center\", d, [\"lft\", \"rgt\"])\n\t\t\tchildren = frappe.get_all(\"Cost Center\", filters={\"lft\": [\">=\", lft], \"rgt\": [\"<=\", rgt]})\n\t\t\tall_cost_centers += [c.name for c in children]\n\t\telse:\n\t\t\tfrappe.throw(_(\"Cost Center: {0} does not exist\".format(d)))\n\n\treturn list(set(all_cost_centers))\n","sub_path":"tailpos_sync/tailpos_sync/report/budget_statement_helper.py","file_name":"budget_statement_helper.py","file_ext":"py","file_size_in_byte":13150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"475823310","text":"from math import cos, sin, radians\n\ndef crusoe(n, d, ang, dist_mult, ang_mult):\n x, y, a = 0, 0, radians(ang)\n for i in range(n):\n x += d * cos(a)\n y += d * sin(a)\n d *= dist_mult\n a *= ang_mult\n return x, y","sub_path":"crusoe.py","file_name":"crusoe.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"508187171","text":"\"\"\"\n@author:Liushihao\n@time:2020/5/16:15:23\n@email:Liushihao_1224@163.com\n@describe: 编写一个程序,使用正则表达式校验输入的车牌号是否正确。\n\"\"\"\nimport re\ncar_at = re.compile('^(([\\u4e00-\\u9fa5]{1}[A-Z]{1})[-]?|([wW][Jj][\\u4e00-\\u9fa5]{1}[-]?)|([a-zA-Z]{2}))[A-Za-z0-9]{5}$')\nwhile True:\n car_num = input(\"请输入车牌号: \")\n result = re.search(car_at, car_num)\n if result:\n print(\"车牌号正确\")\n else:\n print(\"车牌号不正确\")","sub_path":"chapter8/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"203127002","text":"# From https://github.com/google-research/planet/blob/0c6f7d3c56fe691da5b0a2fc62db3cb7075cfcf4/planet/control/wrappers.py#L427\n\nimport datetime\nimport io\nimport os\nimport uuid\n\nimport numpy as np\nimport tensorflow as tf\n\nclass CollectGymDataset(object):\n \"\"\"Collect transition tuples and store episodes as Numpy files.\"\"\"\n\n def __init__(self, env, outdir):\n self._env = env\n self._outdir = outdir and os.path.expanduser(outdir)\n self._episode = None\n self._transition = None\n\n def __getattr__(self, name):\n return getattr(self._env, name)\n\n def step(self, action, *args, **kwargs):\n if kwargs.get('blocking', True):\n transition = self._env.step(action, *args, **kwargs)\n return self._process_step(action, *transition)\n else:\n future = self._env.step(action, *args, **kwargs)\n return lambda: self._process_step(action, *future())\n\n def reset(self, *args, **kwargs):\n if kwargs.get('blocking', True):\n observ = self._env.reset(*args, **kwargs)\n return self._process_reset(observ)\n else:\n future = self._env.reset(*args, **kwargs)\n return lambda: self._process_reset(future())\n\n def _process_step(self, action, observ, reward, done, info):\n self._transition.update({'action': action, 'reward': reward})\n self._transition.update(info)\n self._episode.append(self._transition)\n self._transition = {}\n if not done:\n self._transition.update(self._process_observ(observ))\n else:\n episode = self._get_episode()\n info['episode'] = episode\n if self._outdir:\n filename = self._get_filename()\n self._write(episode, filename)\n return observ, reward, done, info\n\n def _process_reset(self, observ):\n self._episode = []\n self._transition = {}\n self._transition.update(self._process_observ(observ))\n return observ\n\n def _process_observ(self, observ):\n if not isinstance(observ, dict):\n observ = {'observ': observ}\n return observ\n\n def _get_filename(self):\n timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n identifier = str(uuid.uuid4()).replace('-', '')\n filename = '{}-{}.npz'.format(timestamp, identifier)\n filename = os.path.join(self._outdir, filename)\n return filename\n\n def _get_episode(self):\n episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}\n episode = {k: np.array(v) for k, v in episode.items()}\n for key, sequence in episode.items():\n if sequence.dtype == 'object':\n message = \"Sequence '{}' is not numeric:\\n{}\"\n raise RuntimeError(message.format(key, sequence))\n return episode\n\n def _write(self, episode, filename):\n if not tf.gfile.Exists(self._outdir):\n tf.gfile.MakeDirs(self._outdir)\n with io.BytesIO() as file_:\n np.savez_compressed(file_, **episode)\n file_.seek(0)\n with tf.gfile.Open(filename, 'w') as ff:\n ff.write(file_.read())\n name = os.path.splitext(os.path.basename(filename))[0]\n print('Recorded episode {}.'.format(name))\n","sub_path":"collect_gym_dataset.py","file_name":"collect_gym_dataset.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"235260300","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 3 15:31:10 2018\r\n\r\n\"\"\"\r\nimport threading as tr,time\r\n\r\ndef slp():\r\n time.sleep(5)\r\n print('wake up')\r\n \r\ntobj=tr.Thread(target=slp)\r\ntobj.start()\r\ntobj.join() #to join main thread to child\r\n\r\nprint('done')","sub_path":"python/threading.py","file_name":"threading.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"149793480","text":"from otree.api import Currency as c, currency_range\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\nfrom . import models\nimport random\n\nclass PickCard(Page):\n form_model = models.Player\n form_fields = ['believe1', 'scale1', 'believe2', 'scale2']\n\n def vars_for_template(self):\n return {\n 'msg': self.player.content\n }\n\n\nclass PickSelect(Page):\n pass\n\n\nclass Intro(Page):\n def vars_for_template(self):\n self.player.set_k()\n msgs = self.player.get_msg()\n self.player.content = msgs[self.player.card_content()]\n\n\nclass FinalResult(Page):\n def vars_for_template(self):\n # random.shuffle(self.player.participant.vars['payoff'])\n chosen_round = self.player.participant.vars['chosen_round']\n urn = self.player.participant.vars['payoff'][chosen_round]\n comprehension = round(float(self.player.participant.vars['comprehension']), 1)\n urn = round(float(urn) * self.session.config['real_world_currency_per_point'],1)\n IQ = round(float(self.player.participant.vars['IQ']) * self.session.config['real_world_currency_per_point'], 1)\n self.player.final_payoff = round(urn + IQ + Constants.show_up + comprehension, 1)\n return {\n 'Urn': urn,\n 'IQ': IQ,\n 'show_up': Constants.show_up,\n 'comprehension': comprehension,\n 'total': urn + IQ + Constants.show_up + comprehension\n }\n\npage_sequence = [\n Intro,\n PickSelect,\n PickCard,\n FinalResult\n]\n","sub_path":"pickCard/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"13022363","text":"\nfrom django.urls import path,include, re_path\nfrom . import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\napp_name = 'minor'\n\nurlpatterns = [\n\n path('',views.HomeView.as_view(),name='new-home'),\n path('project//',views.ProjectDetailView.as_view(), name= 'project'),\n path(\"group-registration/\", views.group_registration,name='group-registration'),\n path(\"faculty-registration/\", views.faculty_registration,name='faculty-registration'),\n path('login/', views.user_login,name='user-login'),\n path('logout/', views.user_logout,name = 'user-logout'),\n path('project-form/', views.project_form_view, name = 'project-form'),\n path('coordinator-projects/', views.coordinator_home, name = 'coordinator-home'),\n path('mentor-approval//', views.mentor_approval, name= 'mentor-approval'),\n path('hod-approval//', views.hod_approval, name= 'hod-approval'),\n path('project//edit/', views.ProjectUpdateView.as_view(), name='project_edit'),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"project3/minor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"17225693","text":"#!/usr/bin/env python\n\n\nfrom pyphylogenomics import BLAST\nimport sys\n\n\nblast_output = sys.argv[1].strip()\nmodel_genome = sys.argv[2].strip()\noutput_file = sys.argv[3].strip()\nspecies_name = sys.argv[4].strip()\n\nBLAST.blastParser(blast_output, model_genome, output_file, species_name)\n","sub_path":"code/parse_blast.py","file_name":"parse_blast.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"577499697","text":"import sys\nimport math\n\nclass ProcessBar:\n\tdef __init__(self, TargetValue, barWidth = 80, barBody = '='):\n\t\tself.currentValue = 0\n\t\t\n\t\tif len(barBody) == 1:\n\t\t\tself.barBody = barBody\n\t\telse:\n\t\t\traise Exception(\"The bar body should be build with a single halfwidth character.\")\n\t\t\n\t\ttry:\n\t\t\tself.barWidth = int(barWidth)\n\t\texcept ValueError:\n\t\t\traise Exception(\"The process bar width should be an integer.\")\n\t\t\t\n\t\ttry:\n\t\t\tself.TargetValue = float(TargetValue)\n\t\texcept ValueError:\n\t\t\traise Exception(\"The target value should be an integer.\")\n\t\t\n\n\tdef start(self):\n\t\tsys.stdout.write(\"[%s]\" % (\" \" * self.barWidth))\n\t\tsys.stdout.flush()\n\t\tsys.stdout.write(\"\\b\" * (self.barWidth+1)) # return to start of line, after '['\n\n\tdef process(self,nowProcess):\n\t\t# Draw the progress bar\n\t\tprecurrentValue = self.currentValue\n\t\tself.currentValue = int(math.floor((nowProcess/self.TargetValue) * self.barWidth))\n\t\tsys.stdout.write(self.barBody * (self.currentValue - precurrentValue))\n\t\tsys.stdout.flush()\n\n\tdef end(self):\n\t\t# End the progress bar\n\t\tif self.currentValue != self.TargetValue:\n\t\t\tsys.stdout.write(\"=\" * (self.barWidth - self.currentValue))\n\t\t\tsys.stdout.flush()\n\t\tsys.stdout.write(\"\\n\")\n","sub_path":"ProcessBar.py","file_name":"ProcessBar.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"624121425","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\n# 单例模式\nclass Singleton(object):\n __instance = None\n\n def __new__(cls, age, name):\n if not cls.__instance:\n cls.__instance = object.__new__(cls)\n return cls.__instance\n\n\na = Singleton(18, 'dg')\nb = Singleton(8, 'ac')\na.age = 19\nprint(b.age)\n","sub_path":"singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"372661266","text":"import os\nimport sys\nimport time\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom pylab import *\nfrom sklearn.linear_model import LogisticRegression, Ridge\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\n\nsys.path.append('/gpfs/projects/gavingrp/dongmeic/sdm/python/models_new')\nimport model_utils_new as util\nfrom construct_model_matrices_random import ModelMatrixConstructor\n\n# model1 - model with only bioclimatic variables\n# model2 - model with bioclimatic variables, transformation and interactions\n# model3 - model with bioclimatic variables, transformation, interactions and beetle variables\n# model4 - add age and density to model3\n\nmodel = 'model2'\nDATA_DIR = '/gpfs/projects/gavingrp/dongmeic/sdm/data/Xy_random_split_data'\nIMG_DIR = '/gpfs/projects/gavingrp/dongmeic/beetle/output/plots/images/' + model\nOUT_DIR = '/gpfs/projects/gavingrp/dongmeic/beetle/output/tables/' + model\nREGULARIZER = 'l1'\nprint('Regularizer:', REGULARIZER)\n\ndef main():\n make_dirs()\n plt.rcParams['figure.figsize'] = 10, 8\n TEST = False\n dropBtl = False\n dropVgt = False\n matrix_constructor = ModelMatrixConstructor(DATA_DIR, TEST)\n matrix_constructor.construct_model_matrices()\n if model == 'model1':\n \ttest_vars = matrix_constructor.get_variables()\n else:\n \ttest_vars = matrix_constructor.add_beetle_vars()\n \tif model == 'model2':\n \t\tdropBtl = True\n \telif model != 'model5':\n \t\tdropVgt = True\n #test_vars = matrix_constructor.add_interactions()\n #test_vars = matrix_constructor.add_variations()\n for var in ['x', 'y', 'year']:\n \t\ttest_vars.append(var)\n test_vars = sorted(test_vars)\n data_sets = matrix_constructor.select_variables(test_vars)\n [[X_train, y_train], [X_valid, y_valid], [X_test, y_test]] = data_sets\n for (data_set, name) in zip(data_sets, ['Train', 'Valid', 'Test']):\n \t\tprint_dims(data_set, name)\n util.print_percent_presence(y_train, 'y_train')\n util.print_percent_presence(y_valid, 'y_valid')\n util.print_percent_presence(y_test, 'y_test')\n y_train.columns=['btl_t']\n y_valid.columns=['btl_t']\n y_test.columns=['btl_t']\n full_train = X_train.copy()\n full_valid = X_valid.copy()\n full_test = X_test.copy()\n full_train['btl_t'] = y_train['btl_t']\n full_valid['btl_t'] = y_valid['btl_t']\n full_test['btl_t'] = y_test['btl_t']\n drop = ['x', 'y', 'year']\n if dropBtl:\n \tbtl_sum9 = [var for var in list(X_train) if 'btl' in var or 'sum9' in var]\n \tvgt = [var for var in list(X_train) if 'age' in var or 'density' in var]\n \tdrop += vgt\n \tdrop += btl_sum9\n \tdrop.append('vgt')\n if dropVgt:\n \tvgt = [var for var in list(X_train) if 'age' in var or 'density' in var]\n \tdrop += vgt\n X_train = X_train.drop(drop, axis=1)\n X_valid = X_valid.drop(drop, axis=1)\n X_test = X_test.drop(drop, axis=1)\n predictors = list(X_train)\n X_train, X_valid, X_test = scale_data(X_train, X_valid, X_test)\n y_train = y_train['btl_t'].values.reshape(-1)\n y_valid = y_valid['btl_t'].values.reshape(-1)\n y_test = y_test['btl_t'].values.reshape(-1)\n\n print('Fitting model...')\n BEST_C = get_best_C(X_train, y_train, X_valid, y_valid, predictors)\n logistic_clf = LogisticRegression(C=BEST_C, penalty=REGULARIZER, solver='saga', n_jobs=-1)\n logistic_clf.fit(X_train, y_train)\n preds = logistic_clf.predict(X_test)\n probs = logistic_clf.predict_proba(X_test)\n accuracy = sum(y_test == preds) / len(preds)\n print('Test accuracy:', accuracy) \n\n pred_ps = logistic_clf.predict_proba(X_test)\n pred_ps = np.array([p[1] for p in pred_ps])\n THRESHOLD = 0.5\n preds = get_predictions_at_threshold(pred_ps, THRESHOLD)\n best_threshold = threshold_plot(pred_ps, y_test);\n print('\\n\\nConfusion Matrices============================================')\n print('0.5 threshold:')\n cm = util.make_confusion_matrix(y_test, pred_ps, 0.5)\n metrics = util.get_metrics(cm)\n print('\\n\\nOptimal threshold:', best_threshold['threshold'])\n cm = util.make_confusion_matrix(\n y_test, pred_ps, best_threshold['threshold'])\n metrics = util.get_metrics(cm)\n auc_metrics = util.get_auc(y_test, pred_ps, OUT_DIR)\n util.plot_roc(\n auc_metrics['fpr'], auc_metrics['tpr'], path='%s/roc.png' % IMG_DIR)\n coefs = pd.DataFrame(\n [[pred, coef]\n for pred, coef in zip(predictors, logistic_clf.coef_[0])],\n columns=['predictor', 'coef'])\n coefs['abs'] = np.abs(coefs.coef)\n coefs = coefs.sort_values('abs', ascending=False)\n coefs = coefs.drop(['abs'], axis=1)\n print(coefs)\n coefs.to_csv('%s/coefficients.csv' % OUT_DIR, index=False)\n print('\\n\\nModel intercept:', logistic_clf.intercept_)\n\n pred_ps_train = logistic_clf.predict_proba(X_train)\n pred_ps_train = np.array([p[1] for p in pred_ps_train])\n pred_ps_valid = logistic_clf.predict_proba(X_valid)\n pred_ps_valid = np.array([p[1] for p in pred_ps_valid])\n full_train['probs'] = pred_ps_train\n full_train['preds'] = get_predictions_at_threshold(\n pred_ps_train, best_threshold['threshold'])\n full_valid['probs'] = pred_ps_valid\n full_valid['preds'] = get_predictions_at_threshold(\n pred_ps_valid, best_threshold['threshold'])\n full_test['probs'] = pred_ps\n full_test['preds'] = get_predictions_at_threshold(\n pred_ps, best_threshold['threshold'])\n all_data = full_train.append(full_valid).append(full_test)\n all_data.index = range(all_data.shape[0])\n years = sorted(full_train.year.unique())\n df = all_data[['x', 'y', 'year', 'btl_t', 'probs', 'preds']]\n df.to_csv('%s/predictions.csv' % OUT_DIR, index=False)\n\n print('\\n\\nGenerating prediction plots==================================')\n for year in years:\n print(' Train...')\n make_actual_pred_and_error_matrices(\n full_train,\n year,\n plot=True,\n path='%s/pred_plot_train_%d.png' % (IMG_DIR, year))\n print(' Valid...')\n make_actual_pred_and_error_matrices(\n full_valid,\n year,\n plot=True,\n path='%s/pred_plot_valid_%d.png' % (IMG_DIR, year))\n print(' Test...')\n make_actual_pred_and_error_matrices(\n full_test,\n year,\n plot=True,\n path='%s/pred_plot_test_%d.png' % (IMG_DIR, year))\n print(' Combined probabilities...')\n make_actual_pred_and_error_matrices(\n all_data,\n year,\n pred_type='probs',\n plot=True,\n path='%s/prob_plot_all_%d.png' % (IMG_DIR, year))\n print('all done!')\n \ndef make_dirs():\n for d in [IMG_DIR, OUT_DIR]:\n if not os.path.exists(d):\n os.makedirs(d)\n \n \ndef print_dims(data_set, name):\n print('%s:\\n X: %r\\n y: %r'\n % (name, data_set[0].shape, data_set[1].shape))\n\n\ndef scale_data(X_train, X_valid, X_test):\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_valid = scaler.transform(X_valid)\n X_test = scaler.transform(X_test)\n return X_train, X_valid, X_test\n\ndef get_best_C(X_train, y_train, X_valid, y_valid, predictors):\n\t\tl_mods = []\n\t\tCs = np.logspace(-4, 0, 5)\n\t\tbest_C = np.nan\n\t\tbest_accuracy = 0\n\t\tt0 = time.time()\n\t\tbest_penalty = None\n\t\tfor C in Cs:\n\t\t\t\tprint('Testing C =', C)\n\t\t\t\tfor penalty in [REGULARIZER]:\n\t\t\t\t\t\tprint(' %s:' % penalty, end=' ')\n\t\t\t\t\t\tlogistic_clf = LogisticRegression(C=C, penalty=penalty, solver='saga', n_jobs=-1)\n\t\t\t\t\t\tlogistic_clf.fit(X_train, y_train)\n\t\t\t\t\t\tpreds = logistic_clf.predict(X_valid)\n\t\t\t\t\t\taccuracy = sum(y_valid == preds) / len(preds)\n\t\t\t\t\t\ta = [[pred, coef] for pred, coef in zip(predictors, logistic_clf.coef_[0])]\n\t\t\t\t\t\tsig_preds = []\n\t\t\t\t\t\tsig_coefs = []\n\t\t\t\t\t\tfor pred, coef in a:\n\t\t\t\t\t\t\t\tif abs(coef) > 0:\n\t\t\t\t\t\t\t\t\t\tsig_preds.append(pred)\n\t\t\t\t\t\t\t\t\t\tsig_coefs.append(coef)\n\t\t\t\t\t\tprint([sig_preds[i] for i in argsort(np.abs(sig_coefs))[::-1]])\n\t\t\t\t\t\tprint([sig_coefs[i] for i in argsort(np.abs(sig_coefs))[::-1]])\t\t\t\t\t\t\n\t\t\t\t\t\tif (accuracy > best_accuracy):\n\t\t\t\t\t\t\t\tbest_C = C\n\t\t\t\t\t\t\t\tbest_accuaracy = accuracy\n\t\t\t\t\t\t\t\tbest_penalty = penalty\n\t\t\t\t\t\tprint('Validation accuracy:', round(accuracy, 4))\n\t\t\t\t\t\tl_mods.append(accuracy)\n\t\t\t\t\t\tprint('Elapsed time: %.2f minutes' % ((time.time() - t0) / 60))\n\t\tprint(l_mods)\n\t\treturn best_C\n\ndef get_predictions_at_threshold(pred_ps, threshold):\n return 1 * (pred_ps >= threshold)\n\n\ndef threshold_plot(pred_ps, targets, plot=False):\n thresholds = np.linspace(0, 1, 500)\n accuracies = []\n n = len(pred_ps)\n for threshold in thresholds:\n preds = get_predictions_at_threshold(pred_ps, threshold)\n accuracies.append((preds == targets).sum() / n)\n if plot:\n plt.plot(thresholds, accuracies);\n optimal_threshold = thresholds[np.argmax(accuracies)]\n optimal_accuracy = max(accuracies)\n if plot:\n plt.plot([optimal_threshold, optimal_threshold],\n [min(accuracies), max(accuracies)],\n 'r')\n plt.plot([0, 1], [optimal_accuracy, optimal_accuracy], 'r')\n plt.xlabel('Threshold for predicting \"Renewal\"')\n plt.ylabel('Accuracy')\n return {'threshold': optimal_threshold, 'accuracy': optimal_accuracy}\n\n\ndef pred_plot(actual_matrix, pred_matrix, error_matrix, year, path):\n fig = plt.figure()\n plt.subplot(131)\n imshow(np.rot90(actual_matrix));\n plt.title('%d Actual' % year);\n plt.subplot(132)\n imshow(np.rot90(pred_matrix));\n plt.title('%d Predicted' % year);\n plt.subplot(133)\n imshow(np.rot90(error_matrix));\n plt.title('%d Error' % year);\n fig.savefig(path)\n\n\ndef make_actual_pred_and_error_matrices(\n data, year, pred_type='preds', plot=False, path=''):\n data_year = data.loc[data.year == year, :]\n actual_matrix = util.column2matrix(data_year, 'btl_t')\n pred_matrix = util.column2matrix(data_year, pred_type)\n error_matrix = pred_matrix - actual_matrix\n if plot:\n pred_plot(actual_matrix, pred_matrix, error_matrix, year, path)\n return actual_matrix, pred_matrix, error_matrix\n \nif __name__ == '__main__':\n main()\n","sub_path":"python/models_new/logistic_model_random.py","file_name":"logistic_model_random.py","file_ext":"py","file_size_in_byte":10334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"195049400","text":"import random\r\nprint(\"Number guessing game\")\r\n\r\nrandomNum=random.randint(1,10)\r\nchances=0\r\nprint(\"Guess a number between 1 and 10\")\r\nwhile chances<5: \r\n guess=int(input(\"Enter your guess: \"))\r\n if guess==randomNum:\r\n print(\"Congratulations you won\")\r\n break\r\n elif guess 100:\n output = '亲,你走错片场了吧!输入的数字不在范围内'\n mb.showinfo('Hint:', output)\n elif guess == number:\n output = '恭喜,猜数正确'\n mb.showinfo('欢迎来到数字竞猜游戏:', output)\n break\n elif guess < number:\n low = guess\n output = '猜测数字比系统小,新范围'+str(low)+'--' + str(height)\n low = guess\n mb.showinfo('欢迎来到数字竞猜游戏', output)\n else:\n height = guess\n output = '猜测数字比系统大,新范围' + str(low)+'--' + str(height)\n mb.showinfo('欢迎来到数字竞猜游戏', output)\nprint('结束')\n\n","sub_path":"python/guess_number/guess_number3.py","file_name":"guess_number3.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"581831639","text":"from die import Die\r\nimport matplotlib.pyplot as plt\r\n#创建一个色子\r\ndie = Die()\r\nresults = [] #存储摇出来的结果\r\nfor roll_num in range(1000):\r\n result = die.roll()\r\n results.append(result)\r\n#分析结果\r\nfrequencies = []\r\nfor value in range(1,die.num_sides+1): #左闭右开\r\n frequency = results.count(value)\r\n frequencies.append(frequency)\r\nprint(results)\r\nprint(frequencies)\r\n#可视化结果\r\n\r\nplt.bar(range(1,7),frequencies,align='center')\r\nplt.xlabel(\"number\")\r\nplt.ylabel(\"Frequncy\")\r\n'''\r\n前边设置的x、y值其实就代表了不同柱子在图形中的位置(坐标),通过for循环找到每一个x、y值的相应坐标——a、b,\r\n再使用plt.text在对应位置添文字说明来生成相应的数字标签,而for循环也保证了每一个柱子都有标签。\r\n其中,a, b+0.05表示在每一柱子对应x值、y值上方0.05处标注文字说明, \r\n'%.0f' % b,代表标注的文字,即每个柱子对应的y值, ha='center', va= 'bottom'代表horizontalalignment(水平对齐)、\r\nverticalalignment(垂直对齐)的方式,fontsize则是文字大小。\r\n链接:https://www.jianshu.com/p/5ae17ace7984\r\n'''\r\nfor x,y in zip(range(1,7),frequencies): #设置数字标签\r\n plt.text(x,y+0.05,'%.0f'%y,ha = 'center',va='bottom')\r\nplt.grid()\r\nplt.show()","sub_path":"test/PYTHON/matplotlib/plt_bar.py","file_name":"plt_bar.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"224413922","text":"import logging\n\nfrom flask import Flask, escape, request, jsonify, abort\nfrom jjigae.naver import Term, parallel_lookup\nfrom jjigae.prestudy import extract\nfrom jjigae.common import COMMON_WORDS\n\napp = Flask(__name__)\napp.config[\"JSON_AS_ASCII\"] = False\n\n\n@app.route(\"/prestudy\", methods=[\"POST\"])\ndef prestudy():\n args = request.get_json()\n if \"text\" in args and isinstance(args[\"text\"], str):\n text = args[\"text\"]\n words = extract(text)\n limit = (\n args[\"limit_to_common_words\"] if \"limit_to_common_words\" in args else None\n )\n limit = limit if limit < len(COMMON_WORDS) else None\n return jsonify(\n {\n k: v.__dict__\n for (k, v) in parallel_lookup(\n words, restrict_to_common_words=limit\n ).items()\n }\n )\n else:\n return abort(jsonify({\"error\": \"Need text (str) in JSON body\"}))\n\n\n@app.route(\"/lookup\", methods=[\"POST\"])\ndef lookup():\n args = request.get_json()\n if \"word\" in args and isinstance(args[\"word\"], str):\n word = args[\"word\"]\n try:\n term = Term.lookup(word)\n if term:\n return jsonify(term.__dict__)\n else:\n return jsonify({\"not_found\": True})\n except Exception as e:\n app.logger.error(\"Error processing %s: %s\", word, str(e))\n return abort(jsonify({\"word\": word, \"error\": str(e)}))\n\n elif \"words\" in args and isinstance(args[\"words\"], list):\n words = args[\"words\"]\n try:\n return jsonify({k: v.__dict__ for (k, v) in parallel_lookup(words).items()})\n except Exception as e:\n app.logger.error(\"Error processing %s: %s\", str(words), str(e))\n return abort(jsonify({\"words\": words, \"error\": str(e)}))\n else:\n return abort(\n jsonify({\"error\": \"Need word (str) or words (list[str]) in JSON body\"})\n )\n\n\n@app.route(\"/\")\ndef status():\n return jsonify(status=\"ok\")\n\n\nif __name__ != \"__main__\":\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"37008453","text":"#\n# Copyright © 2021 Uncharted Software Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\n\nimport pandas as pd\nfrom d3m import container, utils\nfrom d3m.metadata import base as metadata_base, hyperparams\nfrom d3m.primitive_interfaces import base, transformer\nfrom distil.primitives import utils as distil_utils\nfrom distil.primitives.utils import SINGLETON_INDICATOR, CATEGORICALS\nfrom distil.utils import CYTHON_DEP\nimport version\n\n__all__ = (\"ReplaceSingletonsPrimitive\",)\n\nlogger = logging.getLogger(__name__)\n\n\nclass Hyperparams(hyperparams.Hyperparams):\n use_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=[\n \"https://metadata.datadrivendiscovery.org/types/ControlParameter\"\n ],\n description=\"A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.\",\n )\n\n\nclass ReplaceSingletonsPrimitive(\n transformer.TransformerPrimitiveBase[\n container.DataFrame, container.DataFrame, Hyperparams\n ]\n):\n \"\"\"\n Replaces category members with a count of one with a shared singleton token value. Currently applies to columns\n with semantic type Categorical, Ordinal or DateTime.\n \"\"\"\n\n metadata = metadata_base.PrimitiveMetadata(\n {\n \"id\": \"7cacc8b6-85ad-4c8f-9f75-360e0faee2b8\",\n \"version\": version.__version__,\n \"name\": \"Replace singeltons\",\n \"python_path\": \"d3m.primitives.data_transformation.replace_singletons.DistilReplaceSingletons\",\n \"source\": {\n \"name\": \"Distil\",\n \"contact\": \"mailto:cbethune@uncharted.software\",\n \"uris\": [\n \"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/replace_singletons.py\",\n \"https://github.com/uncharted-distil/distil-primitives\",\n ],\n },\n \"installation\": [\n CYTHON_DEP,\n {\n \"type\": metadata_base.PrimitiveInstallationType.PIP,\n \"package_uri\": \"git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives\".format(\n git_commit=utils.current_git_commit(os.path.dirname(__file__)),\n ),\n },\n ],\n \"algorithm_types\": [\n metadata_base.PrimitiveAlgorithmType.ENCODE_BINARY,\n ],\n \"primitive_family\": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,\n },\n )\n\n def produce(\n self,\n *,\n inputs: container.DataFrame,\n timeout: float = None,\n iterations: int = None,\n ) -> base.CallResult[container.DataFrame]:\n logger.debug(f\"Running {__name__}\")\n\n # set values that only occur once to a special token\n outputs = inputs.copy()\n\n # determine columns to operate on\n cols = distil_utils.get_operating_columns(\n inputs, self.hyperparams[\"use_columns\"], CATEGORICALS\n )\n\n for c in cols:\n vcs = pd.value_counts(list(inputs.iloc[:, c]))\n singletons = set(vcs[vcs == 1].index)\n if singletons:\n mask = outputs.iloc[:, c].isin(singletons)\n outputs.loc[mask, outputs.columns[c]] = SINGLETON_INDICATOR\n\n logger.debug(f\"\\n{outputs}\")\n\n return base.CallResult(outputs)\n","sub_path":"distil/primitives/replace_singletons.py","file_name":"replace_singletons.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"90097293","text":"# Definition for binary tree with next pointer.\n# class TreeLinkNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n# self.next = None\n\nclass Solution:\n # @param root, a tree link node\n # @return nothing\n def connect(self, root):\n if not root:\n return\n nodeQueue = [root]\n while nodeQueue:\n prevNode = None\n for node in nodeQueue:\n if not prevNode:\n prevNode = node\n continue\n prevNode.next = node\n prevNode = node\n \n nodeQueue = [childNode for node in nodeQueue for childNode in [node.left, node.right] if childNode]","sub_path":"LeetCode/BFS/116_PopulatingNextRightPointersInEachNode.py","file_name":"116_PopulatingNextRightPointersInEachNode.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"214130854","text":"from os import environ\n\nfrom flask import current_app\n\n\ndef get_default_value(name):\n return {\n 'YOTI_APPLICATION_ID': environ.get('YOTI_APPLICATION_ID'),\n 'YOTI_CLIENT_SDK_ID': environ.get('YOTI_CLIENT_SDK_ID'),\n 'YOTI_KEY_FILE_PATH': environ.get('YOTI_KEY_FILE_PATH'),\n 'YOTI_REDIRECT_TO': 'flask_yoti.profile',\n 'YOTI_LOGIN_VIEW': 'flask_yoti.login',\n }.get(name)\n\n\ndef get_config_value(name):\n try:\n config = current_app.config\n parameter = config.get(name, get_default_value(name))\n except RuntimeError:\n parameter = get_default_value(name)\n\n if parameter is None:\n raise RuntimeError(\n 'Required parameter \"{0}\" is not configured'.format(name)\n )\n return parameter\n","sub_path":"plugins/flask_yoti/flask_yoti/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"157850494","text":"from feature_creator import FeatureCreator\nfrom train_one_prediction import TrainOnePrediction\nfrom weight import Weight\n\n\nclass OnlineLearning:\n\n def __init__(self, feature, data, flabel):\n self.feature = feature\n self.data = data\n self.phi = {} # Φファイ\n self.label = 0\n self.flabel = flabel\n self.weight = {}\n self.iteration = 1000\n\n def online_learning(self):\n count = 0\n\n \"\"\" initinalize \"\"\"\n for value in self.feature.values():\n split_word = value.split(' ')\n [self.weight.update({self.flabel + word: 0}) for word in split_word]\n cfeature = FeatureCreator()\n\n \"\"\" update weight \"\"\"\n while self.iteration >= count:\n count = count + 1\n for key, value in self.feature.items():\n self.phi = cfeature.create(value, self.data, self.flabel)\n self.label = TrainOnePrediction(self.weight, self.phi)\n if self.label is not key:\n update_weight = Weight(self.weight, self.phi, key)\n self.weight = update_weight.update()\n","sub_path":"NLP_programing/chapter3/online_learning.py","file_name":"online_learning.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"356247166","text":"# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport os.path\nimport re\nimport shutil\nimport tempfile\n\nimport git\n\nfrom dockstack import docker\nfrom dockstack import exceptions\n\n\nenv_pattern = re.compile(\"\\$\\{\\w+\\}\")\n\n\nclass DockerContainer(object):\n \"\"\"Class to create a docker container.\"\"\"\n\n log = logging.getLogger(__name__)\n\n def __init__(self, service, common_conf, service_conf,\n config_dir, noop=False):\n \"\"\"Initialize with a given configuration.\n\n :param service: Name of the service.\n :param common_conf: Dictionary with config values from common YAML.\n :param service_conf: Dictionary with config values from service YAML.\n :param config_dir: Directory with configuration files.\n :param noop: Do not actually execute commands when requested.\n :param quiet: Do things more quietly.\n\n :raises: InvalidConfig on config errors.\n \"\"\"\n if 'common' not in common_conf:\n raise exceptions.InvalidConfig(\"Expecting a common config.\")\n if 'service' not in service_conf:\n raise exceptions.InvalidConfig(\"Expecting a service config.\")\n\n self.common_conf = common_conf['common']\n self.service_conf = service_conf['service']\n self.service_name = service\n self.noop = noop\n self.volumes = [] # Container volumes\n\n # Environment variables. We automatically supply the config dir.\n self.envs = {}\n self.envs['dockstack_config'] = config_dir\n\n # Set our working directory. All files will be added here and all\n # prep commands will be run from here, as well.\n self.workdir = self.service_conf.get('workdir', '/dockstack')\n\n # Script to hold our commands\n self.command_script = \"%s_commands.sh\" % self.service_name\n\n # Actual command to execute. This could change if there is only\n # a single command.\n self.command = os.path.join(self.workdir, self.command_script)\n\n self.docker = docker.Docker(noop)\n\n def _add_volumes(self, conf):\n \"\"\"Verify volumes have a source and destination.\"\"\"\n if 'volumes' not in conf:\n return\n for volume in conf['volumes']:\n if volume.find(':') == -1:\n raise exceptions.InvalidConfig(\n \"Invalid volume '%s'. Format is SRC:DST\" % volume)\n src, dst = volume.split(':')\n if not src or not dst:\n raise exceptions.InvalidConfig(\n \"Invalid volume '%s'. Format is SRC:DST\" % volume)\n\n # Volumes may contain environment variable references\n # that we need to handle here.\n src = self._replace_envs(src)\n dst = self._replace_envs(dst)\n self.volumes.append((src, dst))\n\n def _build_image(self):\n \"\"\"Build a customized image from the base image.\"\"\"\n base_image = self._get_image()\n new_image = self._get_image_name()\n\n dockerfile = []\n dockerfile.append(\"FROM %s\" % base_image)\n\n # Set build environment variables\n for env, val in self.envs.items():\n dockerfile.append(\"ENV %s %s\" % (env, val))\n\n if 'expose' in self.service_conf:\n for port in self.service_conf['expose']:\n dockerfile.append(\"EXPOSE %s\" % port)\n\n dockerfile.append(\"WORKDIR %s\" % self.workdir)\n\n try:\n tempd = tempfile.mkdtemp()\n\n if len(self.service_conf['commands']) == 1:\n self.command = self.service_conf['commands'][0]\n else:\n self._commands_as_script(\n self.service_conf['commands'],\n os.path.join(tempd, self.command_script))\n\n dockerfile.append(\"ADD %s %s/\" % (self.command_script,\n self.workdir))\n\n # Add all files to the workdir location.\n # NOTE: The files may contain env var references, but docker\n # itself should handle those during build since we define\n # them above with ENV. However, our file copy operation below\n # must substitute them.\n if 'add' in self.service_conf:\n for entry in self.service_conf['add']:\n if entry.find(':') > 0:\n local_file, container_file = entry.split(':')\n else:\n local_file = entry\n container_file = None\n\n basename = os.path.basename(local_file)\n\n # ADD requires the file to be relative to the source dir\n shutil.copy(self._replace_envs(local_file), tempd)\n\n if container_file:\n dockerfile.append(\"ADD %s %s/%s\" % (basename,\n self.workdir,\n container_file))\n else:\n # NOTE: The trailing slash on workdir is needed\n dockerfile.append(\"ADD %s %s/\" % (basename,\n self.workdir))\n\n # Run prep commands _after_ all files have been added.\n if 'prep' in self.service_conf:\n for command in self.service_conf['prep']:\n dockerfile.append(\"RUN %s\" % command)\n\n dockerfile.extend(self._handle_sources(tempd))\n dockerfile = '\\n'.join(dockerfile)\n\n with open(os.path.join(tempd, 'Dockerfile'), 'w') as f:\n f.write(dockerfile)\n\n self.docker.build(tempd, new_image)\n\n finally:\n shutil.rmtree(tempd)\n self.log.debug(\"Dockerfile:\\n-------\\n%s\\n--------\" % dockerfile)\n\n def _commands_as_script(self, commands, script_name):\n \"\"\"Combine commands into a master script file.\n\n :param commands: A list of commands to execute.\n :param script_name: Full path to the script to build.\n \"\"\"\n with open(script_name, \"w\") as f:\n f.write(\"#!/bin/sh\\n\")\n f.write(\"\\n\".join(commands))\n f.write(\"\\n\")\n os.chmod(script_name, 0o700)\n\n def _container_exists(self, name):\n containers = self.docker.ps(show_all=True)\n for container in containers:\n if name in container['names']:\n return True\n return False\n\n def _get_envs(self):\n \"\"\"Read environment variables from the configs.\n\n An environment variable defined in the service config takes precedence\n over one defined in the common config.\n \"\"\"\n if 'environment' in self.common_conf:\n for k, v in self.common_conf['environment'].iteritems():\n self.envs[k] = v\n if 'environment' in self.service_conf:\n for k, v in self.service_conf['environment'].iteritems():\n self.envs[k] = v\n\n def _get_image(self):\n \"\"\"Determine the docker image to use.\"\"\"\n if 'image' in self.service_conf:\n return self.service_conf['image']\n return self.common_conf['image']\n\n def _get_image_name(self):\n return \"dockstack/%s\" % self.service_name\n\n def _handle_sources(self, build_dir):\n \"\"\"Create the build commands for 'sources' entries.\n\n We have to put the sources into the docker build directory, as that is\n a requirement for the ADD instructions. We copy them there, if needed,\n but try not to copy stuff we don't need.\n\n :param build_dir: The docker build directory.\n\n :returns: A list of commands to use for the docker build.\n \"\"\"\n commands = []\n\n ignored = ('*.pyc', '.tox', '.testrepository')\n\n if 'sources' not in self.service_conf:\n return commands\n\n for source in self.service_conf['sources']:\n source = self._replace_envs(source)\n\n # Git repos\n if source[0:6] == \"git://\":\n base = os.path.basename(source)\n repo_name = os.path.splitext(base)[0]\n git_dir = os.path.join(build_dir, repo_name)\n self.log.debug(\"GIT CLONE: %s TO %s\" % (source, git_dir))\n if not self.noop:\n git.Repo.clone_from(source, git_dir)\n commands.append(\"ADD %s %s/%s\" %\n (repo_name, self.workdir, repo_name))\n commands.append(\"WORKDIR %s/%s\" % (self.workdir, repo_name))\n commands.append(\"RUN pip install -U -r requirements.txt\")\n commands.append(\"RUN pip install -U -r test-requirements.txt\")\n commands.append(\"RUN python setup.py install\")\n\n # Local directories\n elif os.path.isdir(source):\n base = os.path.basename(source)\n if not self.noop:\n shutil.copytree(source,\n os.path.join(build_dir, base),\n ignore=shutil.ignore_patterns(*ignored))\n commands.append(\"ADD %s %s/%s\" % (base, self.workdir, base))\n commands.append(\"WORKDIR %s/%s\" % (self.workdir, base))\n commands.append(\"RUN pip install -U -r requirements.txt\")\n commands.append(\"RUN pip install -U -r test-requirements.txt\")\n commands.append(\"RUN python setup.py install\")\n else:\n raise exceptions.InvalidConfig(\n \"Source '%s' is not valid.\" % source)\n\n return commands\n\n def _image_exists(self, image):\n \"\"\"Check if a docker image exists.\n\n If the 'history' command fails, we assume it doesn't exist.\n\n :param image: Image name to check.\n :returns: True if the image exists, False otherwise.\n \"\"\"\n try:\n self.docker.history(image)\n except Exception:\n return False\n return True\n\n def _replace_envs(self, string):\n \"\"\"Substitute any env variables within a string with their value.\n\n Look for any \"${env}\" substrings and replaces it with the value of\n the 'env' variable.\n\n :param string: The string in which to do substitutions.\n\n :returns: The result string.\n :raises: InvalidConfig if the environment variable is not defined.\n \"\"\"\n new_string = string\n iterator = env_pattern.finditer(string)\n for match in iterator:\n begin, end = match.span()\n env = string[begin+2:end-1]\n if env not in self.envs:\n raise exceptions.InvalidConfig(\n \"Variable '%s' not defined\" % env)\n new_string = new_string.replace(string[begin:end], self.envs[env])\n return new_string\n\n def _validate_configs(self):\n \"\"\"Validate the common and service configurations.\n\n This will validate the configuration values needed to use docker.\n\n :raises: InvalidConfig on config errors.\n \"\"\"\n required_common = ['image']\n\n # Check for missing values.\n for value in required_common:\n if value not in self.common_conf:\n raise exceptions.InvalidConfig(\"Missing '%s' value\" % value)\n\n self._get_envs()\n self._add_volumes(self.common_conf)\n self._add_volumes(self.service_conf)\n\n def start(self, rebuild=False):\n \"\"\"Start the docker container.\n\n The configurations from both the common YAML and service YAML will\n be used to configure and start the docker container. The container\n will be named after the service name for convenience.\n\n Each service container will have a directory from the host machine\n mounted as a volume within each container (the /data directory of\n the container). This can be used for data persistence, such as logs.\n\n :param rebuild: Rebuild the container if it exists.\n\n :raises: InvalidConfig on config errors.\n \"\"\"\n self._validate_configs()\n image = self._get_image_name()\n description = self.service_conf.get('description', self.service_name)\n\n # If container already exists, just start it.\n container_exists = self._container_exists(self.service_name)\n if container_exists and not rebuild:\n self.log.info(\"START - %s\" % description)\n self.docker.start(self.service_name)\n return\n\n ports = []\n if 'ports' in self.service_conf:\n for port_mapping in self.service_conf['ports']:\n host_port, container_port = port_mapping.split(':')\n ports.append((host_port, container_port))\n\n links = []\n if 'links' in self.service_conf:\n for link in self.service_conf['links']:\n links.append((link, link))\n\n daemon = True\n if 'daemon' in self.service_conf:\n daemon = self.service_conf['daemon']\n\n image_exists = self._image_exists(image)\n\n if rebuild and image_exists:\n if container_exists:\n self.docker.rm(self.service_name)\n self.docker.rmi(image)\n self._build_image()\n elif not image_exists:\n self._build_image()\n\n self.log.info(\"RUN - %s\" % description)\n self.docker.run(image,\n self.command,\n name=self.service_name,\n links=links,\n ports=ports,\n volumes=self.volumes,\n daemon=daemon)\n","sub_path":"dockstack/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":14292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"100655996","text":"##############################################################\n# Libraries\n##############################################################\nimport numpy as np\nimport csv\nimport random\n\n\n##############################################################\n# Variable Definition\n##############################################################\n\n\n##############################################################\n# Class Definition\n##############################################################\n# Node Class\n# Create a Node and its data\n# API Operation | Description\n# Node(track_id) | Create Node\n# str | Print track_id\nclass Node:\n # Initialize a node\n # Node(track_id)\n # Input: track_id\n # Output: None\n def __init__(self, track_id, track_name=None, artist=None, album=None, file_location=None, next=None):\n self.track_id = track_id\n self.track_name = track_name\n self.artist = artist\n self.album = album\n self.file_location = file_location\n self.next = next\n\n # Print track_id\n # str()\n # input: None\n # Output: string of track_id\n def __str__(self):\n return str(self.track_id)\n\n\n# LinkedList class\n# Create a linked list\n# API Operation | Description\n# LinkedList(head) | Create a LinkedList by initialize the head\n# len | Print the length of the LinkedList\n# insert_to_front(data) | Create a data Node and set to the head\n# find(data) | Find Node with desired data\n# append(data) | Append new data to the end\n# delete(data) | Delete Node with desired data\n# insert_after(pre_data, data) | Insert a data Node right after the current one\n# print_list() | Print the whole LinkedList\nclass LinkedList(object):\n # Initialize a LinkedList\n # LinkedList(data)\n # Input: data\n # Output: None\n def __init__(self, head):\n self.head = head\n\n # print LinkedList length\n # len()\n # Input: None\n # Output: length of the LinkedList\n def __len__(self):\n current = self.head\n counter = 0\n while current is not None:\n counter += 1\n current = current.next\n return counter\n\n # Find the nth linked Node\n # find_nth(n)\n # Input: n as integer\n # Output nth's track_id\n def find_nth(self, n):\n current = self.head\n counter = 0\n track_id = 0\n if n <= 0:\n print(\"Invalid Input in Finding Location\")\n exit(1)\n while counter < n:\n counter += 1\n current = current.next\n track_id = current.track_id\n if current.next is None:\n print(\"Not Found in the List\")\n return track_id\n return track_id\n\n # insert to the front\n # insert_to_front(data)\n # Input: data\n # Output: newly created node\n def insert_to_front(self, track_id, track_name=None, artist=None, album=None, file_location=None):\n if track_id is None:\n return None\n node = Node(track_id=track_id, track_name=track_name, artist=artist, album=album, file_location=file_location,\n next=self.head)\n self.head = node\n return node\n\n # Find Node with desired track_id\n # find(track_id)\n # Input: track_id\n # Output: None\n def find(self, track_id, feedback=False):\n if track_id is None:\n return None\n curr_node = self.head\n while curr_node is not None:\n if curr_node.track_id == track_id:\n return curr_node\n curr_node = curr_node.next\n # Determine Feedback\n if feedback:\n return curr_node\n else:\n return None\n\n # Append new data to the end\n # append(data)\n # Input: data\n # Output: None\n def append(self, track_id, track_name=None, artist=None, album=None, file_location=None):\n if track_id is None:\n return None\n node = Node(track_id=track_id, track_name=track_name, artist=artist, album=album, file_location=file_location)\n if self.head is None:\n self.head = node\n return node\n curr_node = self.head\n while curr_node.next is not None:\n curr_node = curr_node.next\n curr_node.next = node\n return node\n\n # Delete Node if data matches\n # delete(data)\n # Input: data\n # Output: None\n def delete(self, track_id):\n if track_id is None:\n return None\n if self.head is None:\n return None\n if self.head.track_id == track_id:\n self.head = self.head.next\n return\n prev_node = self.head\n curr_node = self.head.next\n while curr_node is not None:\n if curr_node.track_id == track_id:\n prev_node.next = curr_node.next\n return\n else:\n prev_node = curr_node\n curr_node = curr_node.next\n\n # Insert after certain node\n # insert_after(pre_data, data)\n # Input: pre_data, data\n # Output: None\n def insert_after(self, pre_track_id, track_id, track_name=None, artist=None, album=None, file_location=None):\n new_node = Node(track_id=track_id, track_name=track_name, artist=artist, album=album,\n file_location=file_location)\n pre_node = self.find(pre_track_id, feedback=True)\n new_node.next = pre_node.next\n pre_node.next = new_node\n\n # Print the LinkedList\n # print_list()\n # Input: None\n # Output: None\n def print_list(self):\n temp = self.head\n counter = 1\n while temp:\n print(counter, \"||\", temp.track_id, \"|\", temp.track_name, \"| by:\", temp.artist, \"| in:\", temp.album)\n temp = temp.next\n counter += 1\n\n # str()\n # creates a string with all the data from the list\n # inputs: none\n # returns: list in the form of a string\n def __str__(self):\n s = ''\n cur = self.head\n if cur is None:\n s += \"EMPTY\"\n while cur is not None:\n s += str(cur.track_id) + ' '\n cur = cur.next\n return s\n\n # Return shape\n # shape()\n # Input: None\n # Output: [length, height]\n def shape(self):\n return np.asarray([self.__len__(), 5])\n\n\n# HashTable Class\n# Create a Node and its data\n# API Operation | Description\n# Node(track_id) | Create Node\n# insert(item) | Insert an item\n# hash_function(key) | Hashing Function\n# str | Print track_id\nclass HashTable:\n # __init___(length)\n # constructor that makes an empty HashTable with length\n # inputs: numElements which is number of elements in Hash_Table\n # returns: none\n def __init__(self, length):\n self.length = length\n self.table = [None] * self.length\n index = 0\n for item in self.table:\n self.table[index] = LinkedList(None)\n index += 1\n self.n_data = 0\n\n # _hashFunc\n # hashing function\n # inputs: key\n # returns: location in hash table\n def hash_function(self, key):\n return key.track_id\n\n # insert(item)\n # inserts an item in the hash table\n # inputs: item - to insert\n # returns: none\n def insert(self, item):\n loc = int(self.hash_function(item))\n self.table[loc].append(item)\n\n # find(loc)\n # Find item at location\n # input: loc\n # output: item at loc\n def find(self, location):\n return self.table[location].head\n\n # str()\n # creates a string with all the data from the table\n # inputs: none\n # returns: table in the form of a string\n def __str__(self):\n s = ''\n i = 0\n for x in self.table:\n s += \"Data at index \" + str(i) + \" is \\n\"\n s += str(self.table[i])\n s += \"\\n\"\n i = i + 1\n return s\n\n # __getitem__(item)\n # Obtain Linked Note at location\n # input: location\n # output: node at location\n def __getitem__(self, item):\n if 0 < item < self.length:\n return self.table[item].head.track_id\n else:\n print(\"Error(900): Index Out of Range or not in range\")\n exit(900)\n\n\n# Class Library\n# Create library\n# API Operation | Description\n# Library(playlist_length) | Create Library from csv\n# add_track(playlist, add_track_id, add_track_location) | Add track to a location in playlist\n# print(playlist) | Print playlist\nclass Library:\n def __init__(self, playlist_length, file_name=\"raw_track.csv\", repeat=False):\n # Load Library\n self.file_name = file_name\n self.data_range = 175 # 155321\n self.library = HashTable(self.data_range)\n with open(self.file_name) as csv_file:\n tracks = csv.reader(csv_file, dialect='excel')\n index = 0\n for row in tracks:\n if index > 0:\n new_node = Node(track_id=row[0],\n track_name=row[37],\n artist=row[5],\n album=row[2],\n file_location=row[26])\n self.library.insert(item=new_node)\n index += 1\n # Check Playlist Length\n self.playlist_length = playlist_length\n if self.playlist_length > self.data_range:\n print(\"## Length Out of Range ##\")\n self.playlist_length = self.data_range\n else:\n self.playlist_length = self.playlist_length\n # Generate Playlist\n random_list = np.zeros(self.playlist_length)\n index = 0\n while index <= self.playlist_length - 1:\n temp_num = random.randint(0, self.data_range - 1)\n # Check if exists\n if not repeat:\n exist = 0\n for index2 in range(0, self.playlist_length):\n if temp_num == int(random_list[index2]):\n exist = 1\n if exist == 0 and self.library.find(temp_num) is not None:\n random_list[index] = temp_num\n index += 1\n else:\n index = index\n else:\n random_list[index] = temp_num\n index += 1\n # Push playlist info\n self.playlist = LinkedList(None)\n for item in random_list:\n track_id = int(item)\n self.playlist.append(track_id=self.library[track_id].track_id,\n track_name=self.library[track_id].track_name,\n artist=self.library[track_id].artist,\n album=self.library[track_id].album,\n file_location=self.library[track_id].file_location)\n\n # Add a track to the playlist\n # add_track(playlist, add_track_id, add_track_location)\n # Input: playlist, add_track_id, add_track_location\n # Output: modified playlist\n def add_track(self, playlist, add_track_id, add_track_location):\n outer_range = len(playlist)\n # Find the corresponding track info\n if self.library[add_track_id] is not None:\n if add_track_location > outer_range or add_track_location < 1:\n print(\"Warning(901): Add Out of Index\")\n add_loc = outer_range\n else:\n add_loc = add_track_location - 1\n # Add to playlist\n pre_track = playlist.find_nth(n=add_loc)\n playlist.insert_after(pre_track_id=pre_track,\n track_id=add_track_id,\n track_name=self.library[add_track_id].track_name,\n artist=self.library[add_track_id].artist,\n album=self.library[add_track_id].album,\n file_location=self.library[add_track_id].file_location)\n return LinkedList(head=playlist.head)\n else:\n print(\"Warning(902): Item not found in library\")\n return LinkedList(head=playlist.head)\n\n # Print the current playlist\n # print(playlist)\n # Input: playlist\n # Output: None\n def print(self, playlist):\n print(\"Printing Current Playlist\")\n playlist.print_list()\n print(\"\")\n\n\n##############################################################\n# Function Prototype\n##############################################################\ndef test_api():\n # Create Library\n lib = Library(file_name=\"raw_track_short.csv\", playlist_length=10)\n # Obtain Playlist\n playlist = lib.playlist\n # Print Playlist\n lib.print(playlist=playlist)\n # Add new song to playlist\n playlist = lib.add_track(playlist=playlist, add_track_id=-1, add_track_location=5) #Cory tried 154, -1 pass, -1, 5 fail\n # Print playlist\n lib.print(playlist)\n\n\n##############################################################\n# Main Function\n##############################################################\ndef main():\n print(\"Hello World!\")\n test_api()\n\n\n##############################################################\n# Main Function Runner\n##############################################################\nif __name__ == \"__main__\":\n main()\n","sub_path":"lab6/lab6_2.py","file_name":"lab6_2.py","file_ext":"py","file_size_in_byte":13684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"188639296","text":"import xml.etree.ElementTree as ET\nimport csv\nfrom graph import drawGraph\n\n# Requiered Define with Tablaeu Workbook should be analyzed\ntablaeuWorkBook = ET.parse('src/2019_12-02 - Cost to Serve (Live) - DH.twb')\n# Requiered Choose the Tab or Datasource from Tablaeu Workbook\ntablaeuSource=\"customer_order_profile (ML Estimated)\"\n\n# Optional Do you wand to see only connceted Fields, when keep True?\nonlyConnectedFileds=True\n\ndef parse():\n root = tablaeuWorkBook.getroot()\n for datasource in root.findall('datasources/datasource'):\n value = datasource.get('caption')\n print (value)\n columnID=0\n columnCollections={}\n all_calculation=[]\n if (value == tablaeuSource):\n for column in datasource.findall(\"column\"):\n columnID=columnID+1\n columnName=column.get(\"name\")\n columnCaption=column.get(\"caption\")\n columnRole=column.get(\"role\")\n columnCollections.update({columnID: columnName})\n dataset=[]\n typeOfColumn=\"field\"\n idCalcColumnAll=[]\n\n \n if column.find(\"calculation\") is not None:\n columnFormula=column.find(\"calculation\").get(\"formula\")\n typeOfColumn=\"calculation\"\n idx_s =[i for i in range(len(columnFormula)) if columnFormula.startswith(\"[\", i)]\n idx_e =[i for i in range(len(columnFormula)) if columnFormula.startswith(\"]\", i)] \n j=0\n\n for pos_start_all in idx_s:\n pos_end_all=idx_e[j]\n j=j+1\n valueAll=columnFormula[pos_start_all:pos_end_all+1]\n idCalcColumnAll.append(valueAll)\n else:\n columnFormula=column.find(\"calculation\")\n \n dataset.append(columnID)\n dataset.append(columnCaption)\n dataset.append(columnName)\n dataset.append(typeOfColumn)\n dataset.append(columnFormula)\n dataset.append(idCalcColumnAll)\n dataset.append(columnRole)\n all_calculation.append(dataset) \n final=[]\n\n #HEAD ROW FOR CSV\n head=[]\n head.append(\"id\")\n head.append(\"caption\")\n head.append(\"name\")\n head.append(\"typeOfColumn_parent_calculations\")\n head.append(\"formula\")\n head.append(\"names_of_parent\")\n head.append(\"ids_of_parent\")\n head.append(\"role_of_column\")\n final.append(head)\n \n for calc in all_calculation:\n temp=[]\n temp.append(calc[0])\n temp.append(calc[1])\n temp.append(calc[2])\n temp.append(calc[3])\n temp.append(calc[4])\n temp.append(calc[5])\n \n ids=[]\n\n for element in calc[5]:\n dependencies=[]\n for x, y in columnCollections.items(): \n if y==element:\n dependencies.append(x)\n break\n ids.append(dependencies)\n\n temp.append(ids)\n temp.append(calc[6])\n final.append(temp)\n \n with open(\"output/\"+tablaeuSource+\".csv\", 'w', newline='') as myfile:\n wr = csv.writer(myfile, delimiter=';')\n wr.writerows(final) \n\n\nif __name__== \"__main__\":\n parse()\n drawGraph(tablaeuSource, onlyConnectedFileds)","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"415301496","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\nfrom itertools import combinations\n\n\ndef main():\n N = int(input())\n P = read_int_list()\n dp = [[0] * 10010 for _ in range(N + 1)]\n unique = {0}\n for i in range(1, N + 1):\n p = P[i - 1]\n for sum_p in range(10010):\n tmp_p = dp[i - 1][sum_p]\n if sum_p >= p:\n tmp_p = max(tmp_p, dp[i - 1][sum_p - p] + p)\n dp[i][sum_p] = tmp_p\n unique.add(tmp_p)\n print(len(unique))\n\n\nmain()\n","sub_path":"others/tpdc_a.py","file_name":"tpdc_a.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"277034229","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom AnalizadorSistemas.models import *\nfrom django.core import serializers\nfrom django.http import HttpResponse\nfrom AnalizadorSistemas.Mundo import Calculador\nfrom django.template import RequestContext\nimport json\nimport csv\n# Create your views here.\n\ndef login(request):\n\n template = \"login.html\"\n return render(request,template,context_instance=RequestContext(request))\n\n@login_required(redirect_field_name='home')\ndef exportar(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"datos.csv\"'\n writer = csv.writer(response)\n totalSalidas = Salida.objects.all()\n lista = list(totalSalidas)\n matriz = Calculador.generarMatriz(lista)\n print(matriz[0])\n writer.writerow(['Producto', 'Fecha', 'Defectuoso', 'Costo'])\n for i in matriz:\n writer.writerow(i)\n\n\n\n return response\n\n\n\ndef home(request):\n\n #Contenedores de datos\n totalSalidas = Salida.objects.all()\n totalEntradas = Entrada.objects.all()\n\n N = Calculador.obtenerN(totalSalidas)\n #Calculo de Calidad\n totalDatos = Calculador.contatTotal(totalSalidas)\n totalDefectuosos = Calculador.ContarMalos(totalSalidas)\n totalAceptados = Calculador.contarBuenos(totalSalidas)\n #Calculo Tiempo de Ciclo\n tiempoCiclo = Calculador.datosTiempoCiclo(totalEntradas,totalSalidas)\n ##Calculo indicadores\n info = Proceso.objects.all()\n porcentajes= Calculador.obtenerPorcentaje(info)\n disponibilidad = porcentajes[len(porcentajes)-1]\n dispo = str(disponibilidad)\n rendimiento = Calculador.RendimientoPromedio(totalEntradas,totalSalidas,info)\n rendi = float(\"{0:.2f}\".format(rendimiento))\n calidad = Calculador.CalidadPromedio(totalEntradas,totalSalidas)\n calidad = float(\"{0:.2f}\".format(calidad))\n #Calculo Rendimiento\n rendimiento = Calculador.datosRendimiento(totalEntradas,totalSalidas,0.03)\n #Costos\n costos = Calculador.datosCostos(totalSalidas)\n\n diccionario = {'malos':totalDefectuosos,'buenos':totalAceptados,\n 'totalDatos':totalDatos,'N':N,'ciclo':tiempoCiclo,\n 'calidad':str(calidad), 'dispo':dispo, 'rendimientoG':str(rendi), 'rendimientoU':rendimiento,\n 'costos':costos}\n\n\n template = \"index.html\"\n return render(request, template, diccionario, context_instance=RequestContext(request))\n\ndef actualizar(request):\n\n\n #Contenedores de datos\n totalSalidas = Salida.objects.all()\n totalEntradas = Entrada.objects.all()\n N = Calculador.obtenerN(totalSalidas)\n #Calculo de Calidad\n totalDatos = Calculador.contatTotal(totalSalidas)\n totalDefectuosos = Calculador.ContarMalos(totalSalidas)\n totalAceptados = Calculador.contarBuenos(totalSalidas)\n #Calculo Tiempo de Ciclo\n tiempoCiclo = Calculador.datosTiempoCiclo(totalEntradas,totalSalidas)\n ##Calculo indicadores\n info = Proceso.objects.all()\n porcentajes= Calculador.obtenerPorcentaje(info)\n disponibilidad = porcentajes[len(porcentajes)-1]\n dispo = str(disponibilidad)\n rendimiento = Calculador.RendimientoPromedio(totalEntradas,totalSalidas,info)\n rendi = float(\"{0:.2f}\".format(rendimiento))\n calidad = Calculador.CalidadPromedio(totalEntradas,totalSalidas)\n calidad = float(\"{0:.2f}\".format(calidad))\n #Calculo Rendimiento\n rendimiento = Calculador.datosRendimiento(totalEntradas,totalSalidas,0.03)\n #Costos\n costos = Calculador.datosCostos(totalSalidas)\n\n\n\n\n datos = {'malos':totalDefectuosos,'buenos':totalAceptados,\n 'totalDatos':totalDatos,'N':N,'ciclo':tiempoCiclo,\n 'calidad':str(calidad), 'dispo':dispo, 'rendimientoG':str(rendi), 'rendimientoU':rendimiento,\n 'costos':costos}\n\n\n\n\n salida = json.dumps(datos)\n return HttpResponse(salida, content_type='application/json')\n\ndef variables(request):\n template = \"variables.html\"\n return render(request,template,context_instance=RequestContext(request))\n\ndef subir(request):\n # if this is a POST request we need to process the form data\n vInstalaciones = request.POST.get(\"instalaciones\")\n numOperario = request.POST.get(\"operarios\")\n turnosTrabajo = request.POST.get(\"turnosTrabajo\")\n anosMaquina = request.POST.get(\"anosAdquisicionMaquina\")\n porcentajeSeguro = request.POST.get(\"porcentajeSeguro\")\n valorKilowatts = request.POST.get(\"valorKiloWatts\")\n costoHerramienta = request.POST.get(\"costoHerramienta\")\n vidaMaquina = request.POST.get(\"vidaMaquina\")\n presMensual = request.POST.get(\"presupuestoMensual\")\n costoServicios = request.POST.get(\"CostoServicios\")\n porcentajeDisponibilidad = request.POST.get(\"porcDisponibilidad\")\n mantenimieno = request.POST.get(\"mantenimiento\")\n tiempoPlaneado = request.POST.get(\"tiempoPlaneado\")\n estandarCiclo = request.POST.get(\"estandarCiclo\")\n\n proceso = Proceso(tiempoPlaneado=tiempoPlaneado, porcDisponibilidad= porcentajeDisponibilidad, valorInstalacion = vInstalaciones,\n numOperarios=numOperario, turnoTrabajo= turnosTrabajo, porcentajeSeguro = porcentajeSeguro,\n valorKilowatts = valorKilowatts, presupuestoMensual = presMensual, costoServicios= costoServicios,\n costoHerramienta = costoHerramienta, vidaMaquina = vidaMaquina, estandarCiclo=estandarCiclo,\n anosMaquina = anosMaquina, mantenimieno=mantenimieno)\n\n\n proceso.save()\n\n\n return HttpResponseRedirect(\"/\")\n\n\n\n\n\n","sub_path":"PDG GitHub/AnalizadorSistemas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"485846845","text":"'''\nCreated on May 7, 2014\n\n@author: jeffy\n'''\nimport os\ndef rename(dir):\n files = os.listdir(dir)\n os.chdir(dir)\n count = 0\n for f in files:\n if f.endswith('.pdf'):\n count += 1\n os.rename(f, 'KDD'+str(count)+'.pdf')\n \nif __name__ == '__main__':\n rename('../../data/KDD11')","sub_path":"src/lib/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"650890228","text":"from turtle import *\nfrom time import sleep\n\n\nlives = 3\ncanvas = getcanvas()\nregister_shape('panda_2.gif')\n\nclass Panda(Turtle):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.pu()\n\t\tself.size = 20\n\t\tself.dx = 4.5\n\t\tself.dy = 4.5\n\t\tcanvas = getcanvas()\n\t\tself.shape('panda_2.gif')\n\n\tdef move(self):\n\t\tself.goto(self.xcor() + self.dx, self.ycor() + self.dy)\n\n\tdef check_tile_collision(self, list):\n\t\tfor entity in list:\n\t\t\tif (self.xcor() + self.size > entity.xcor() - entity.width and self.xcor() - self.size < entity.xcor() + entity.width) and\\\n\t\t\t(self.ycor() + self.size > entity.ycor() - entity.height and self.ycor() - self.size < entity.ycor() + entity.height):\n\t\t\t\tif entity.ycor() - entity.height < self.ycor() < entity.ycor() + entity.height:\n\t\t\t\t\tself.dx *= -1\n\t\t\t\tif entity.xcor() - entity.width < self.xcor() < entity.xcor() + entity.width:\n\t\t\t\t\tself.dy *= -1\n\t\t\t\tentity.hp -= 1\n\t\t\t\tif entity.hp == 0:\n\t\t\t\t\tentity.goto(500,0)\n\n\t\t\t\telse:\n\t\t\t\t\tentity.draw_self()\n\n\t\t\t\tif self.dx > 0:\n\t\t\t\t\tself.dx += 0.05\n\t\t\t\telse:\n\t\t\t\t\tself.dx -= 0.05\n\n\t\t\t\tif self.dy > 0:\n\t\t\t\t\tself.dy += 0.05\n\t\t\t\telse:\n\t\t\t\t\tself.dy -= 0.05\n\t\t\t\t#replace with math stuff\n\n\tdef check_player_collision(self, player):\n\t\tif self.xcor() + self.size > player.xcor() and self.xcor() - self.size < player.xcor() + player.width and\\\n\t\tself.ycor() + self.size > player.ycor() - player.height and self.ycor() - self.size < player.ycor() + player.height:\n\t\t\tself.dy *= -1\n\t\t\tself.dx = abs(self.dx)\n\n\t\telif self.xcor() - self.size < player.xcor() and self.xcor() + self.size > player.xcor() - player.width and\\\n\t\tself.ycor() + self.size > player.ycor() - player.height and self.ycor() - self.size < player.ycor() + player.height:\n\t\t\tself.dy *= -1\n\t\t\tself.dx = -abs(self.dx)\n\n\n\tdef check_edge_collision(self):\n\t\tglobal lives, heart_stamps, hearts\n\t\tif self.xcor() >= canvas.winfo_width()/2 - self.size or self.xcor() <= -canvas.winfo_width()/2 + self.size:\n\t\t\tself.dx *= -1\n\n\t\tif self.ycor() >= canvas.winfo_height()/2 - self.size:\n\t\t\tself.dy *= -1\n\n\t\telif self.ycor() - self.size <= -canvas.winfo_height()/2 + 10:\n\t\t\tlives -= 1\n\t\t\tif lives == 0:\n\t\t\t\treturn True\n\t\t\tself.goto(0,0)\n\t\t\tgetscreen().update()\n\t\t\tsleep(1.5)\n\t\t\treturn 'reduce'\n\t\telse:\n\t\t\treturn False","sub_path":"Breakout/Panda.py","file_name":"Panda.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"503765720","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\n#from .validators import validate_file_extension\n\ndef validate_file_extension(value):\n import os\n from django.core.exceptions import ValidationError\n ext = os.path.splitext(value.name)[1] # [0] returns path+filename\n valid_extensions = ['.fasta', '.fas', '.fa', '.fastq', '.fq']\n if not ext.lower() in valid_extensions:\n raise ValidationError(u'Unsupported file extension.')\n\nclass DocumentForm(forms.Form):\n docfile = forms.FileField(\n label='Select a file',\n validators=[validate_file_extension]\n )\n","sub_path":"myproject/myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"313534825","text":"from django.test import SimpleTestCase\nfrom testfixtures import TempDirectory, LogCapture\n\nfrom logging import INFO, ERROR, DEBUG\nfrom unittest import mock\nimport os\n\nfrom hardware.Utils.logger import Logger\n\n\nclass LoggerTests(SimpleTestCase):\n def setUp(self):\n self.temp_dir = TempDirectory()\n\n def tearDown(self):\n self.temp_dir.cleanup()\n\n def test_create_logger_with_dir(self):\n \"\"\"\n Simple test for creating a logger\n \"\"\"\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n logger = Logger(name=\"test_logger\", filename=\"logger.txt\")\n\n self.assertTrue(logger.name == \"test_logger\")\n self.assertTrue(\n logger.format == \"%(asctime)s | %(levelname)s | %(message)s\"\n )\n self.assertTrue(logger.level is INFO)\n\n def test_create_logger_with_level(self):\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n logger = Logger(name=\"test_logger\", filename=\"logger.txt\", level=ERROR)\n\n self.assertTrue(logger.name == \"test_logger\")\n self.assertTrue(\n logger.format == \"%(asctime)s | %(levelname)s | %(message)s\"\n )\n self.assertTrue(logger.level is ERROR)\n\n @mock.patch.object(os, \"makedirs\")\n @mock.patch(\"os.path.exists\")\n def test_makedir_if_not_exist(self, path_mock, dir_mock):\n \"\"\"\n insures that the function os.makedir is called if the supplied directory\n doesn't exist\n \"\"\"\n path_mock.return_value = False\n dir_mock.return_value = self.temp_dir.path\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n Logger(name=\"test_logger\", filename=\"logger.txt\")\n\n dir_mock.assert_called()\n dir_mock.assert_called_with(self.temp_dir.path)\n\n def test_info_message(self):\n \"\"\"\n Tests the .info method\n \"\"\"\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n with LogCapture() as capture:\n logger = Logger(name=\"test_logger\", filename=\"logger.txt\")\n logger.info(\"test message\")\n\n capture.check((\"test_logger\", \"INFO\", \"test message\"))\n\n @mock.patch(\"builtins.print\")\n def test_info_message_with_print(self, mock_print=mock.MagicMock()):\n \"\"\"\n Tests the .info method\n \"\"\"\n with mock.patch.dict(\n os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path, \"SHOW_LOGS\": \"True\"}\n ):\n with LogCapture() as capture:\n logger = Logger(name=\"test_logger\", filename=\"logger.txt\")\n logger.info(\"test message\")\n\n self.assertTrue(mock_print.mock_calls == [mock.call(\"test message\")])\n\n capture.check((\"test_logger\", \"INFO\", \"test message\"))\n\n def test_message_failure(self):\n \"\"\"\n makes sure that nothing is logged during initialization\n \"\"\"\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n with LogCapture() as capture:\n logger = Logger(name=\"test_logger\", filename=\"logger.txt\") # noqa: F841\n capture.check() # expect no output\n\n def test_error_message(self):\n \"\"\"\n Tests the .error method\n \"\"\"\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n with LogCapture() as capture:\n logger = Logger(name=\"test_logger\", filename=\"logger.txt\")\n logger.error(\"test message\")\n\n capture.check((\"test_logger\", \"ERROR\", \"test message\"))\n\n def test_debug_message(self):\n \"\"\"\n Tests the .debug method\n \"\"\"\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n with LogCapture() as capture:\n mylogger = Logger(\n name=\"test_logger\", filename=\"logger.txt\", level=DEBUG\n )\n mylogger.debug(\"test message\")\n\n capture.check((\"test_logger\", \"DEBUG\", \"test message\"))\n\n def test_warn_message(self):\n \"\"\"\n Tests the .warn method\n \"\"\"\n with mock.patch.dict(os.environ, {\"LOG_DIRECTORY\": self.temp_dir.path}):\n with LogCapture() as capture:\n mylogger = Logger(name=\"test_logger\", filename=\"logger.txt\")\n mylogger.warn(\"test message\")\n\n capture.check((\"test_logger\", \"WARNING\", \"test message\"))\n","sub_path":"hardware/tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"208175352","text":"from os.path import join, abspath\nimport subprocess\n\nfrom my_utils.python_utils.general import get_arg_string\nfrom my_utils.tensorflow_utils.training import set_GPUs\nfrom global_settings import PYTHON_EXE, RESULTS_DIR, PROCESSED_DATA_DIR\n\n\ndataset = \"svhn\"\ndata_dir = join(PROCESSED_DATA_DIR, \"ComputerVision\", dataset.upper(), \"bytes\")\ntrain_file = join(data_dir, \"train.npz\")\ntest_file = join(data_dir, \"test.npz\")\n\noutput_dir = abspath(join(RESULTS_DIR, \"semi_supervised\", \"{}\".format(dataset), \"ICT_VD\"))\n\n\n# Default settings\n# =============================== #\nDEFAULT_CONFIG_500 = {\n \"num_labeled\": 500,\n \"input_norm\": \"standard\",\n \"flip_horizontally\": False,\n \"translating_pixels\": 2,\n\n \"batch_size\": 100,\n \"batch_size_labeled\": 25,\n\n \"epochs\": 1000,\n \"steps\": 280000,\n \"rampup_len_step\": 10000,\n \"rampdown_len_step\": 80000,\n\n \"lr_max\": 0.1,\n \"lr_momentum\": 0.9,\n \"weight_decay\": 0.0001,\n \"weight_norm\": False,\n \"gauss_noise\": True,\n\n \"ema_momentum_init\": 0.99,\n \"ema_momentum_final\": 0.99,\n\n \"alpha\": 1.0,\n \"cross_ent_l\": 1.0,\n \"cent_u_coeff_max\": 0.0,\n \"cons_coeff_max\": 10.0,\n \"weight_kld_coeff_max\": 0.05,\n\n \"cons_mode\": \"mse\",\n\n # It is important that this one is False when #labeled is small\n # Which means we also apply consistency loss on labeled data\n \"cons_4_unlabeled_only\": False,\n\n # We should not mask weights\n \"mask_weights\": False,\n\n # Setting this one to either True or False does not change the results\n # However, when \"cons_against_mean\" = False, we should set it to True\n \"ema_4_log_sigma2\": True,\n \"cons_against_mean\": True,\n}\n\n\nDEFAULT_CONFIG_250 = DEFAULT_CONFIG_500\nDEFAULT_CONFIG_250['num_labeled'] = 250\n\n\nDEFAULT_CONFIG_1000 = DEFAULT_CONFIG_500\nDEFAULT_CONFIG_1000['num_labeled'] = 1000\n# =============================== #\n\n\n# Run settings\n# =============================== #\nrun_config = {\n \"output_dir\": output_dir,\n \"dataset\": dataset,\n \"train_file\": train_file,\n \"test_file\": test_file,\n\n\n # 9310gaurav\n # ------------------------ #\n \"model_name\": \"9310gaurav\",\n\n # 250\n # ------------------------ #\n # \"run\": \"0_ICTVD_L250_Nesterov_alpha1.0\",\n # \"run\": \"0a_ICTVD_L250_Nesterov_alpha1.0\",\n # \"run\": \"0b_ICTVD_L250_Nesterov_alpha1.0\",\n # \"num_labeled\": 250,\n # \"alpha\": 1.0,\n # \"cent_u_coeff_max\": 0.0,\n # ------------------------ #\n\n # 500\n # ------------------------ #\n # \"run\": \"100_ICTVD_L500_Nesterov_alpha1.0\",\n # \"run\": \"100a_ICTVD_L500_Nesterov_alpha1.0\",\n # \"run\": \"100b_ICTVD_L500_Nesterov_alpha1.0\",\n # \"num_labeled\": 500,\n # \"alpha\": 1.0,\n # \"cent_u_coeff_max\": 0.0,\n # ------------------------ #\n\n # 1000\n # ------------------------ #\n # \"run\": \"200_ICTVD_L1000_Nesterov_alpha1.0\",\n # \"run\": \"200a_ICTVD_L1000_Nesterov_alpha1.0\",\n # \"run\": \"200b_ICTVD_L1000_Nesterov_alpha1.0\",\n # \"num_labeled\": 1000,\n # \"alpha\": 1.0,\n # \"cent_u_coeff_max\": 0.0,\n # ------------------------ #\n\n \"force_rm_dir\": True,\n}\n# =============================== #\n\nif run_config['num_labeled'] == 250:\n config = DEFAULT_CONFIG_250\nelif run_config['num_labeled'] == 500:\n config = DEFAULT_CONFIG_500\nelif run_config['num_labeled'] == 1000:\n config = DEFAULT_CONFIG_1000\nelse:\n raise ValueError(\"num_labeled={}\".format(run_config['num_labeled']))\n\nconfig.update(run_config)\narg_str = get_arg_string(config)\nset_GPUs([0])\n\nprint(\"Running arguments: [{}]\".format(arg_str))\nrun_command = \"{} ./train.py {}\".format(PYTHON_EXE, arg_str).strip()\nsubprocess.call(run_command, shell=True)","sub_path":"working/semi_supervised/ICT_VD/run_svhn.py","file_name":"run_svhn.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"574149157","text":"#!/usr/bin/python2\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport simplejson as json\nimport random\nimport thread\nimport numpy as np\nimport argparse\n\nargs_parser = argparse.ArgumentParser(description='Simple application suggesting tweets with the help of Machine Learning algorithms')\nargs_parser.add_argument('pseudo', metavar='', type=str,\n help='Twitter user\\'s name (without the @)')\nargs = args_parser.parse_args()\n\nfrom flask import Flask, render_template, request, redirect, url_for, jsonify\napp = Flask(__name__)\n\nimport stream_categorie as sg\nfrom stream_categorie import MyFilteredStream as FStream\n\nuser = args.pseudo\nmax_tweets = 30 # Arbitrary maximum number of tweets\n\ncategories = []\ntweets=[]\n\nNocat=\"Halloween\" # Joker category\nnp.set_printoptions(formatter={'float': '{: 0.2f}'.format})\nstream = FStream()\nstream.stream()\n\n\"\"\"\nSimply show the index page\n:param username: The user's name (simply esthetic for the moment)\n\"\"\"\n@app.route('/')\n@app.route('/')\ndef index(username=user):\n return render_template('index.html', username=username)\n\n\"\"\"\nChange the user (useless for the moment)\n:param request.form['username']: New user's name\n\"\"\"\n@app.route('/switch', methods=['POST', 'GET'])\ndef switchUser():\n user = request.form['username']\n return redirect(url_for('index', username=user))\n\n\"\"\"\nUpdate the list of the categories\n:param json: True if a return is required\n:return: Returns the list of the categories (JSON format)\n\"\"\"\n@app.route('/update_categories', methods=['POST', 'GET'])\ndef update_categories(json=True):\n global categories, stream\n new_categories = stream.get_new_categories()\n\n for c in new_categories:\n categories += [{ \"color\": \"#%06x\" % random.randint(0, 0xFFFFFF),\n \"id\": len(categories),\n \"name\": c}]\n\n if json:\n return jsonify(result=categories)\n\n\"\"\"\nUpdate the list of the tweets\n:param json: True if a return is required\n:return: Returns the list of the tweets (JSON format)\n\"\"\"\n@app.route('/update_tweets', methods=['POST', 'GET'])\ndef update_tweets(json=True):\n global tweets, max_tweets, stream, categories\n\n for nt in stream.get_corpus():\n # Just in case the tweet's category has been removed for a mysterious reason\n has_cat = False\n\n for c in categories:\n if c[\"name\"] == nt[\"cat\"]:\n nt[\"cat\"] = c\n has_cat = True\n\n if has_cat:\n tweets += [nt]\n\n if len(tweets) > max_tweets:\n tweets.pop(0)\n\n stream.clear_corpus()\n\n print(tweets)\n\n if json:\n return jsonify(result=tweets)\n\n\"\"\"\nSimply launche the app in a new thread\nSet debug to True to activate the powerful Flask's debugger\n\"\"\"\nif __name__ == '__main__':\n # thread.start_new_thread(app.run(), debug=True)\n thread.start_new_thread(app.run(), debug=False)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"4358175","text":"def modify_fs(module, blade):\n 'Modify Filesystem'\n changed = True\n if (not module.check_mode):\n mod_fs = False\n nfsv3 = (module.params['nfs'] if (module.params['nfsv3'] is None) else module.params['nfsv3'])\n attr = {\n \n }\n if module.params['user_quota']:\n user_quota = human_to_bytes(module.params['user_quota'])\n if module.params['group_quota']:\n group_quota = human_to_bytes(module.params['group_quota'])\n fsys = get_fs(module, blade)\n if fsys.destroyed:\n attr['destroyed'] = False\n mod_fs = True\n if module.params['size']:\n if (human_to_bytes(module.params['size']) != fsys.provisioned):\n attr['provisioned'] = human_to_bytes(module.params['size'])\n mod_fs = True\n api_version = blade.api_version.list_versions().versions\n if (NFSV4_API_VERSION in api_version):\n if (nfsv3 and (not fsys.nfs.v3_enabled)):\n attr['nfs'] = NfsRule(v3_enabled=nfsv3)\n mod_fs = True\n if ((not nfsv3) and fsys.nfs.v3_enabled):\n attr['nfs'] = NfsRule(v3_enabled=nfsv3)\n mod_fs = True\n if (module.params['nfsv4'] and (not fsys.nfs.v4_1_enabled)):\n attr['nfs'] = NfsRule(v4_1_enabled=module.params['nfsv4'])\n mod_fs = True\n if ((not module.params['nfsv4']) and fsys.nfs.v4_1_enabled):\n attr['nfs'] = NfsRule(v4_1_enabled=module.params['nfsv4'])\n mod_fs = True\n if (nfsv3 or (module.params['nfsv4'] and fsys.nfs.v3_enabled) or fsys.nfs.v4_1_enabled):\n if (fsys.nfs.rules != module.params['nfs_rules']):\n attr['nfs'] = NfsRule(rules=module.params['nfs_rules'])\n mod_fs = True\n if (module.params['user_quota'] and (user_quota != fsys.default_user_quota)):\n attr['default_user_quota'] = user_quota\n mod_fs = True\n if (module.params['group_quota'] and (group_quota != fsys.default_group_quota)):\n attr['default_group_quota'] = group_quota\n mod_fs = True\n else:\n if (nfsv3 and (not fsys.nfs.enabled)):\n attr['nfs'] = NfsRule(enabled=nfsv3)\n mod_fs = True\n if ((not nfsv3) and fsys.nfs.enabled):\n attr['nfs'] = NfsRule(enabled=nfsv3)\n mod_fs = True\n if (nfsv3 and fsys.nfs.enabled):\n if (fsys.nfs.rules != module.params['nfs_rules']):\n attr['nfs'] = NfsRule(rules=module.params['nfs_rules'])\n mod_fs = True\n if (module.params['smb'] and (not fsys.smb.enabled)):\n attr['smb'] = ProtocolRule(enabled=module.params['smb'])\n mod_fs = True\n if ((not module.params['smb']) and fsys.smb.enabled):\n attr['smb'] = ProtocolRule(enabled=module.params['smb'])\n mod_fs = True\n if (module.params['http'] and (not fsys.http.enabled)):\n attr['http'] = ProtocolRule(enabled=module.params['http'])\n mod_fs = True\n if ((not module.params['http']) and fsys.http.enabled):\n attr['http'] = ProtocolRule(enabled=module.params['http'])\n mod_fs = True\n if (module.params['snapshot'] and (not fsys.snapshot_directory_enabled)):\n attr['snapshot_directory_enabled'] = module.params['snapshot']\n mod_fs = True\n if ((not module.params['snapshot']) and fsys.snapshot_directory_enabled):\n attr['snapshot_directory_enabled'] = module.params['snapshot']\n mod_fs = True\n if (module.params['fastremove'] and (not fsys.fast_remove_directory_enabled)):\n attr['fast_remove_directory_enabled'] = module.params['fastremove']\n mod_fs = True\n if ((not module.params['fastremove']) and fsys.fast_remove_directory_enabled):\n attr['fast_remove_directory_enabled'] = module.params['fastremove']\n mod_fs = True\n api_version = blade.api_version.list_versions().versions\n if (HARD_LIMIT_API_VERSION in api_version):\n if ((not module.params['hard_limit']) and fsys.hard_limit_enabled):\n attr['hard_limit_enabled'] = module.params['hard_limit']\n mod_fs = True\n if (module.params['hard_limit'] and (not fsys.hard_limit_enabled)):\n attr['hard_limit_enabled'] = module.params['hard_limit']\n mod_fs = True\n if mod_fs:\n n_attr = FileSystem(**attr)\n try:\n blade.file_systems.update_file_systems(name=module.params['name'], attributes=n_attr)\n except Exception:\n module.fail_json(msg='Failed to update filesystem {0}.'.format(module.params['name']))\n else:\n changed = False\n module.exit_json(changed=changed)","sub_path":"Data Set/bug-fixing-3/1b45abc4f3fd736414d819601126accc4672a80c--fix.py","file_name":"1b45abc4f3fd736414d819601126accc4672a80c--fix.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"270058354","text":"import json\nimport corenlp\nif __name__ == '__main__':\n with open('./nlp.txt', 'r') as f:\n text = f.read()\n corenlp_dir = \"/usr/local/lib/stanford-corenlp-full-2013-06-20/\"\n parser = corenlp.StanfordCoreNLP(corenlp_path=corenlp_dir)\n # 一度に処理すると、途中で処理が切れる\n for line in text.split('\\n'):\n result = json.loads(parser.parse(line))\n for sentence_data in result['sentences']:\n for word_data in sentence_data['words']:\n print(word_data[0])\n","sub_path":"53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"328351509","text":"#!/usr/bin/env python\n\nimport sys, os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport scipy\nimport scipy.interpolate\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 6:\n sys.stderr.write(\"Usage: %s <# of Files> <# Nodes in one direction> \\n\")%(sys.argv[0])\n sys.exit()\n\nnFiles = int(sys.argv[1])\nN = int(sys.argv[2])\ntitle = sys.argv[3]\nnLevels = sys.argv[4]\nextension = sys.argv[5]\n\nnNodes = N * N\n\nxMin = -100\nxMax = 100\nyMin = -100\nyMax = 100\n\nnDensity = [];\nx = [];\ny = [];\ntime = [];\n\nk = 0\nwhile k <= nFiles:\n f = open(str(k)+'-'+str(extension)+'.dat','r')\n lines = f.readlines()\n f.close()\n \n i=0\n for line in lines:\n x.append(float(line.split()[1]))\n y.append(float(line.split()[2]))\n time.append(k)\n i += 1\n k += 499\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.scatter(x,y, c=time, cmap='seismic')\nxlim = plt.xlim([xMin,xMax])\nylim = plt.ylim([yMin,yMax])\n\nplt.xlabel('Angstroms', fontsize=15)\nplt.ylabel('Angstroms', fontsize=15)\nplt.title(title, fontsize=15)\n\nplt.savefig(title+'.png')\nplt.savefig(title+'.eps', format='eps', dpi=1200)\n\n","sub_path":"002-plot/pointByPoint-plot_membrane-saturation.py","file_name":"pointByPoint-plot_membrane-saturation.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"114458962","text":"#!/usr/bin/env python\n#coding: utf-8\n\"\"\"\nConverter module. \nThis is for the moment empty (populated only with almost pass through anonymous functions)\nbut aims to be populated with more sofisticated translators ... \n\n\"\"\"\n# get ready for python3\nfrom __future__ import with_statement, print_function\n\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"jerome.kieffer@esrf.eu\"\n__license__ = \"GPLv3+\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n\nimport types, logging\nlogger = logging.getLogger(\"converter\")\n\ndef convert_data_integer(data):\n \"\"\"\n convert data to integer\n \"\"\"\n if data is not None:\n return data.astype(int)\n else:\n return data\n\n\nCONVERSION_HEADER = {\n (\"edfimage\", \"edfimage\"): lambda header:header,\n }\nCONVERSION_DATA = {\n (\"edfimage\", \"edfimage\"): lambda data:data,\n (\"edfimage\", \"cbfimage\"): convert_data_integer,\n (\"edfimage\", \"mar345image\"): convert_data_integer,\n (\"edfimage\", \"fit2dmaskimage\"): convert_data_integer,\n (\"edfimage\", \"kcdimage\"): convert_data_integer,\n (\"edfimage\", \"OXDimage\"): convert_data_integer,\n (\"edfimage\", \"pnmimage\"): convert_data_integer,\n }\n\ndef convert_data(inp, outp, data):\n \"\"\"\n Return data converted to the output format ... over-simplistic implementation for the moment ...\n @param inp,outp: input/output format like \"cbfimage\"\n @param data(ndarray): the actual dataset to be transformed\n \"\"\"\n return CONVERSION_DATA.get((inp, outp), lambda data:data)(data)\n\ndef convert_header(inp, outp, header):\n \"\"\"\n return header converted to the output format\n @param inp,outp: input/output format like \"cbfimage\"\n @param header(dict):the actual set of headers to be transformed \n \"\"\"\n return CONVERSION_HEADER.get((inp, outp), lambda header:header)(header)\n","sub_path":"fabio-src/converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"96448927","text":"\"\"\" 9. Write a binary search function. It should take a sorted sequence and\nthe item it is looking for. It should return the index of the item if found.\nIt should return -1 if the item is not found.\"\"\"\n\nprint(\"Question 9\")\n\nposition = 0\nlist = [2,4,5,8,10,15,18,25,28,35,48,50]\n\n\ndef binary_search(list, n):\n\n lower_bound = 0\n upper_bound = len(list) - 1\n\n while lower_bound <= upper_bound:\n mid = (lower_bound + upper_bound) // 2\n\n if list[mid] == n:\n globals()['position'] = mid\n return True\n else:\n if list[mid] < n:\n lower_bound = mid + 1\n else:\n upper_bound = mid - 1\n\n return False\n\n\nif binary_search(list,50):\n print(\"Found at: \", position)\nelse:\n print(\"Not Found\")\n","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"320885685","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.admin_dashboard, name=\"admin_dashboard\"),\n path('register-user', views.register_user_admin, name=\"register_user\"),\n path('show-user', views.get_user, name=\"show_user\"),\n path('update-user-to-admin//', views.update_service, name=\"updateservice\"),\n path('deleteservice//', views.delete_service, name=\"deleteservice\"),\n\n path('servicemen-order/', views.ServicemenOrderListView, name=\"admin_servicemen_order\"),\n path('servicemen-order-detail//', views.ServicemenOrderDetailView.as_view(), name=\"admin_servicemen_order_detail\"),\n path('servicemen-order-status//', views.ServicemenOrderStatuChangeView.as_view(), name=\"admin_servicemen_orderstatus_change\"),\n path('servicemen-neworder/', views.ServicemenNewOrderListView, name=\"admin_servicemen_neworder\"),\n path('servicemens/', views.ServicemenView.as_view(), name=\"admin_view_servicemen\"),\n\n]","sub_path":"admins/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"483929162","text":"import numpy as np\n\nfrom qgis.PyQt.QtCore import (Qt)\nfrom qgis.PyQt.QtGui import (QKeyEvent)\nfrom qgis.PyQt.QtWidgets import (QWidget)\nfrom qgis.core import (QgsWkbTypes, QgsGeometry, QgsPointLocator, Qgis)\nfrom qgis.gui import (QgisInterface, QgsMapToolAdvancedDigitizing, QgsMapMouseEvent,\n QgsSnapIndicator)\n\nfrom los_tools.processing.tools.util_functions import get_max_decimal_numbers, round_all_values\nfrom .los_without_target_widget import LoSNoTargetInputWidget\n\n\nclass LosNoTargetMapTool(QgsMapToolAdvancedDigitizing):\n\n def __init__(self, iface: QgisInterface) -> None:\n super().__init__(iface.mapCanvas(), iface.cadDockWidget())\n self._iface = iface\n self._canvas = self._iface.mapCanvas()\n\n self._point = None\n\n self._snapper = self._canvas.snappingUtils()\n self.snap_marker = QgsSnapIndicator(self._canvas)\n\n self._los_rubber_band = self.createRubberBand(QgsWkbTypes.LineGeometry)\n\n self._widget: QWidget = None\n\n def create_widget(self):\n self.delete_widget()\n\n self._widget = LoSNoTargetInputWidget()\n self._iface.addUserInputWidget(self._widget)\n self._widget.setFocus(Qt.TabFocusReason)\n\n self._widget.valuesChanged.connect(self.draw_los)\n\n def delete_widget(self):\n if self._widget:\n self._widget.releaseKeyboard()\n self._widget.deleteLater()\n self._widget = None\n\n def activate(self) -> None:\n super(LosNoTargetMapTool, self).activate()\n self.create_widget()\n self.messageDiscarded.emit()\n self._canvas = self._iface.mapCanvas()\n self._snapper = self._canvas.snappingUtils()\n if self._canvas.mapSettings().destinationCrs().isGeographic():\n self.messageEmitted.emit(\n \"Tool only works if canvas is in projected CRS. Currently canvas is in geographic CRS.\",\n Qgis.Critical)\n self.deactivate()\n return\n self._widget.setUnit(self._canvas.mapSettings().destinationCrs().mapUnits())\n\n def clean(self) -> None:\n self.snap_marker.setVisible(False)\n self._los_rubber_band.hide()\n\n def deactivate(self) -> None:\n self.clean()\n self.delete_widget()\n self._iface.mapCanvas().unsetMapTool(self)\n super(LosNoTargetMapTool, self).deactivate()\n\n def canvasReleaseEvent(self, e: QgsMapMouseEvent) -> None:\n if e.button() == Qt.RightButton:\n self.deactivate()\n elif e.button() == Qt.LeftButton:\n if self._snap_point:\n self._point = self._snap_point\n else:\n self._point = e.mapPoint()\n self.draw_los()\n\n def canvasMoveEvent(self, event: QgsMapMouseEvent) -> None:\n result = self._snapper.snapToMap(event.pos())\n self.snap_marker.setMatch(result)\n if result.type() == QgsPointLocator.Vertex:\n self._snap_point = result.point()\n else:\n self._snap_point = None\n\n def keyPressEvent(self, e: QKeyEvent) -> None:\n if e.key() == Qt.Key_Escape:\n self.deactivate()\n self._iface.mapCanvas().unsetMapTool(self)\n return super().keyPressEvent(e)\n\n def draw_los(self):\n\n canvas_crs = self._canvas.mapSettings().destinationCrs()\n\n if canvas_crs.isGeographic():\n self._iface.messageBar().pushMessage(\n \"LoS can be drawn only for projected CRS. Canvas is currently in geographic CRS.\",\n Qgis.Critical,\n duration=5)\n return\n\n if self._point:\n self._los_rubber_band.hide()\n self._los_rubber_band.setToGeometry(QgsGeometry(),\n self._canvas.mapSettings().destinationCrs())\n angles = np.arange(self._widget.min_angle,\n self._widget.max_angle + 0.000000001 * self._widget.angle_step,\n step=self._widget.angle_step).tolist()\n round_digits = get_max_decimal_numbers(\n [self._widget.min_angle, self._widget.max_angle, self._widget.angle_step])\n angles = round_all_values(angles, round_digits)\n size_constant = 1\n for angle in angles:\n new_point = self._point.project(size_constant, angle)\n geom = QgsGeometry.fromPolylineXY([self._point, new_point])\n geom = geom.extendLine(0, self._widget.length - size_constant)\n self._los_rubber_band.addGeometry(geom,\n self._canvas.mapSettings().destinationCrs())\n self._los_rubber_band.show()\n","sub_path":"los_tools/gui/los_without_target_visualization/los_without_target.py","file_name":"los_without_target.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"537633421","text":"import numpy as np\nfrom scipy.stats import truncnorm\nfrom scipy.stats import poisson\nfrom scipy.stats import norm\nimport random\nimport math\n\n\nclass sSInstance:\n n = 8\n capacity = 125\n ch = 1.0\n cp = 10.0\n co = 10.0\n cr = 0\n init_inv = 0\n max_demand = 60\n prob = []\n max_inv_level = n * max_demand\n min_inv_level = -n * max_demand\n rev_time = []\n means = []\n cv = 1/3.0\n\n def max_inv_bouding(self, threshold):\n demand = [0 for _ in range(self.n * self.max_demand + 1)]\n for i in range(self.max_demand + 1):\n demand[i] = self.prob[0][i]\n for i in range(1, self.n):\n temp = [0 for _ in range(self.n * self.max_demand + 1)]\n for a in range((self.n - 1) * self.max_demand + 1):\n for b in range(self.max_demand + 1):\n # print(\"a = \" + str(a) + \" b = \" + str(b) + \" r = \" + str(r))\n temp[a + b] += self.prob[i][b] * demand[a]\n demand = temp\n j = self.n * self.max_demand\n pdf_dem = demand[j]\n while pdf_dem < threshold:\n j -= 1\n pdf_dem += demand[j - 1]\n\n self.max_inv_level = j\n self.min_inv_level = -j + self.init_inv\n\n\n\n # these functions generate different demands probabilities\n def gen_poisson_probability(self, mu):\n self.prob = [[0.0 for _ in range(0, self.max_demand +1)] for _ in range(self.n)]\n for i in range(self.n):\n for j in range(0, self.max_demand +1):\n self.prob[i][j] = poisson.pmf(j, mu)\n\n def gen_fix_probability(self, avg):\n self.prob = [[0.0 for _ in range(0, self.max_demand +1)] for _ in range(self.n)]\n if not (2 <= avg <= self.max_demand - 2):\n print(\"average outside interval allowed\")\n return\n for i in range(self.n):\n self.prob[i][avg-2] = 0.1\n self.prob[i][avg-1] = 0.2\n self.prob[i][avg] = 0.4\n self.prob[i][avg+1] = 0.2\n self.prob[i][avg+2] = 0.1\n\n def gen_bin_probability(self):\n if self.max_demand != 1:\n print(\"Error - In binary demand max demand has to be equal to 1\")\n self.prob = [[0.5, 0.5]for _ in range(self.n)]\n\n def gen_non_stationary_normal_demand(self, threshold):\n if len(self.means) != self.n:\n print(\"wrong size self.means vector\")\n return\n self.max_demand = int(norm(loc = max(self.means), scale = self.cv*max(self.means)).ppf(1-threshold))\n self.prob = [[0.0 for _ in range(0, self.max_demand +1)] for _ in range(self.n)]\n for i in range(self.n):\n norm_dist = norm(loc=self.means[i], scale = self.cv*self.means[i])\n for j in range(self.max_demand +1):\n self.prob[i][j] = norm_dist.pdf(j)\n self.prob[i] = self.prob[i]/sum(self.prob[i])\n\n def gen_non_stationary_poisson_demand(self, threshold): ## fix this VISE\n if len(self.means) != self.n:\n print(\"wrong size self.means vector\")\n return\n self.max_demand = int(poisson.ppf(loc = 0 , mu = max(self.means), q = (1-threshold)))\n self.prob = [[0.0 for _ in range(0, self.max_demand +1)] for _ in range(self.n)]\n for i in range(self.n):\n norm_dist = norm(loc=self.means[i], scale = self.cv*self.means[i])\n for j in range(self.max_demand +1):\n if self.means[i] != 0 :\n self.prob[i][j] = poisson.pmf(k = j, mu = self.means[i])\n else:\n self.prob[i][j] = poisson.pmf(k = j, mu = 0.1)\n\n self.prob[i] = self.prob[i]/sum(self.prob[i])\n\n def gen_means(self, type):\n self.means = [0 for _ in range(self.n)]\n if type == \"LCYA\":\n for i in range(self.n):\n self.means[i] = round(19 * math.exp(-(i-12)**2) / 5)\n elif type == \"SIN1\":\n for i in range(self.n):\n self.means[i] = round(70 * math.sin(0.8*(i+1)) +80)\n # Form Rossi et al. 2011\n elif type == \"P1\": # Form Rossi et al. 2011\n for i in range(self.n):\n self.means[i] = round(50 * (1 + math.sin(math.pi * i /6)))\n elif type == \"P2\": # Form Rossi et al. 2011\n for i in range(self.n):\n self.means[i] = round(50 * (1 + math.sin(math.pi * i /6))) + i\n elif type == \"P3\": # Form Rossi et al. 2011\n for i in range(self.n):\n self.means[i] = round(50*(1 + math.sin(math.pi * i /6))) + self.n - i\n elif type == \"P4\": # Form Rossi et al. 2011\n for i in range(self.n):\n self.means[i] = round(50 * (1 + math.sin(math.pi * i /6))) + min(i, self.n-i)\n elif type == \"P5\": # Form Rossi et al. 2011\n for i in range(self.n):\n self.means[i] = random.randint(0,100)\n # NEW PATTERNS FOR THE PAPER\n elif type == \"STA\": # new patterns for the paper\n for i in range(self.n):\n self.means[i] = 50\n elif type == \"INC\":\n for i in range(self.n):\n self.means[i] = round(i * 100 / (self.n-1))\n elif type == \"DEC\":\n for i in range(self.n):\n self.means[i] = round(100 - i * 100 / (self.n-1))\n elif type == \"LCY1\":\n c = 0\n for i in range(int(self.n / 3)):\n self.means[i] = round((i + 0.5) * 225 / self.n)\n for i in range(int(self.n /3), 2 * int(self.n /3) + self.n % 3):\n self.means[i] = 75\n for i in range(2 * int(self.n /3) + self.n % 3, self.n):\n self.means[i] = round((int(self.n / 3) - 0.5 - c) * 225 / self.n)\n c+=1\n elif type == \"LCY2\":\n c = 0\n for i in range(int(self.n / 2)):\n self.means[i] = round(i * 100 / (int(self.n/2)-1))\n for i in range(int(self.n /2), self.n):\n self.means[i] = round(100 - c * 100 / (int(self.n/2+0.5)-1))\n c+=1\n elif type == \"ERR\":\n for i in range(self.n):\n self.means[i] = random.randint(0,100)\n elif type == \"SIN\":\n for i in range(self.n):\n self.means[i] = round(50 * (1 + math.sin(math.pi * i /6)))\n\n return self.means\n\n\n\n def gen_demand(self, t):\n val = random.random() # generate a number in [0.0, 1.0) range\n pdf = 0.0\n for i in range(0, self.max_demand+1):\n pdf = pdf + self.prob[t][i]\n if pdf >= val:\n return i\n return self.max_demand\n\n\n","sub_path":"instance.py","file_name":"instance.py","file_ext":"py","file_size_in_byte":6616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"76460646","text":"import sys\nsys.stdin = open(\"swimming_centre_input.txt\",\"r\")\n\ndef f(n,s,d,m,m3):\n global minV\n if n>12:\n if minV >s:\n minV=s\n \n else:\n f(n+1,s+table[n]*d,d,m,m3)\n f(n+1,s+m,d,m,m3)\n f(n+3,s+m3,d,m,m3)\n return minV\n\n\nT = int(input())\nfor tc in range(1,T+1):\n d,m,m3,y = map(int,input().split())\n table = [0] + list(map(int,input().split()))\n minV = y\n print(f(1,0,d,m,m3))\n","sub_path":"Solving Club/휴강기간/swimming_centre_live.py","file_name":"swimming_centre_live.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"333305934","text":"__module_name__ = 'environment'\n__module_version__ = '1.0'\n__module_description__ = 'Weather, time of day, local features, etc.'\n__module_author__ = 'Allen Stetson'\n\nfrom random import randint\nimport sys\nsys.path.insert(0, '/work/td/xchat/mod/')\nfrom dnd.utils import ProbabilityArray\nfrom dnd.locations import LocationGenerator\n\nclass EnvirGenerator(object):\n def __init__(self):\n # Keep these sorted... sorta.\n self.times = [\"dawn\", \"morning\", \"afternoon\", \"day\", \"dusk\", \"twilight\", \"evening\", \"midnight\", \"night\"]\n # Initialize settings.\n self.randNum = randint(1,100)\n self.timeOfDay = self.getTimeOfDay()\n self.temperature = self.getTemperature()\n self.conditionType = None\n self.condition = self.getCondition()\n self.nearbyFeature = None\n self.location = self.getLocation()\n self.moonPhase = self.getMoon() \n \n def getTimeOfDay(self):\n # night, dawn, morning, day, afternoon, dusk, twilight, evening, midnight\n self.timeOfDay = self.times[randint(0,len(self.times)-1)]\n return self.timeOfDay\n \n def getTemperature(self):\n # Warm, cold, hot, frigid, cool,\n temps = [('frigid',1),('cold',2),('cool',3),('warm',3),('hot',2),('blisteringly hot',1)]\n probabilityArray = ProbabilityArray()\n temps = probabilityArray.create(temps)\n if self.randNum == 1:\n temps = ['frigid','blisteringly hot']\n self.temperature = temps[randint(0,len(temps)-1)]\n return self.temperature\n \n def getCondition(self):\n # Sunny, windy, rainy, snowy, still, blustery, clear, torrentially rainy, blizzardy, foggy, smoky\n temp = self.temperature\n conditions = [('windy','wind'),('rainy','wet'),('still','norm'),('blustery','wind'),('clear','norm'),\n ('smoky','smoke'),('hazy','norm')]\n if temp == \"frigid\" or temp == \"cold\":\n conditions.extend([('hailing','cold'), ('snowy','cold'), ('blizzarding','cold'),\n ('torrentially rainy','cold'),('foggy','cold'),('drizzly','cold')])\n elif temp == \"hot\" or temp == \"blisteringly hot\":\n conditions.extend([('stagnant','hot'), ('dusty','hot')])\n else:\n conditions.extend([('torrentially rainy','wet'), ('foggy','norm'), ('drizzly','wet')])\n (self.condition,self.conditionType) = conditions[randint(0,len(conditions)-1)]\n return self.condition\n \n def getLocation(self, conditionType=None):\n if not conditionType:\n conditionType = self.conditionType\n locationGenerator = LocationGenerator(conditionType=conditionType)\n self.location = locationGenerator.location\n self.getNearbyFeature()\n return self.location\n \n def getNearbyFeature(self, location=None):\n if not location:\n location = self.location\n locationGenerator = LocationGenerator(location=location)\n self.nearbyFeature = locationGenerator.nearbyFeature\n return self.nearbyFeature\n \n def getMoon(self):\n phases = ['waxing','quarter','half','gibbous','full','waning','crescent','new']\n phase = phases[randint(0,len(phases)-1)]\n return phase\n \n def getWeather(self):\n \"\"\"\n Based on our initial \"roll of the dice\" (random int 0-100), if it is exceptionally low,\n death is imminent. Exceptionally high, and special things result. In the middle, and you\n get a more normal weather event.\n\n Within the normal weather event, things like the time of day and location are used to generate\n an appropriate event.\n :return: printable string for xchat\n :rtype: str\n \"\"\"\n # DEATH\n if self.randNum == 1:\n printStr = \"It is an unbearably %s %s. It takes all of your effort to stave off death from this %s weather, \" % (self.temperature,self.timeOfDay,self.condition)\n printStr += \"and soon your strength will run out. Your bones will litter the %s where you lay.\" % (self.location)\n return(printStr)\n # PARADISE\n elif self.randNum == 95:\n printStr = \"It is a perfect %s in the %s. The weather could not be more to your liking. \" % (self.timeOfDay, self.location)\n printStr += \"You can not help but wonder if you hold the favor of your gods, at the moment.\"\n return(printStr)\n # DENIAL\n elif self.randNum > 95:\n printStr = \"It is a quiet, air-conditioned %s in the workplace. \" % (self.timeOfDay)\n printStr += \"The ambient hum of machines fills the air with soothing white noise, its haze broken only by occasional echoes of coworker dialogue in the distant cubicles. \"\n printStr += \"You recline comfortably in your office chair, toggling idly between desktops as your mind drifts to far-off fantasy lands, \"\n printStr += \"pondering what the future will bring and \"\n if self.timeOfDay in self.times[:2]:\n printStr += \"what the fates have decreed for lunch today.\"\n elif self.timeOfDay in self.times[-2:]:\n printStr += \"when you can stop burning the midnight oil.\"\n else:\n printStr += \"digesting the plate of lunch you consumed earlier.\"\n return(printStr)\n # NORMAL (not DEATH, PARADISE, or DENIAL)\n else:\n # UNDERDARK\n if self.location == \"underdark\":\n printStr = \"It is a %s %s %s in the world above, though down here in the %s it is always the same.\" % (self.temperature, self.condition, self.timeOfDay, self.location)\n if self.conditionType == \"cold\":\n printStr += \" The cold from the %s seems to radiate downward, although the torches provide a comforting warmth in this otherwise \" % self.nearbyFeature\n printStr += \"unwelcoming world.\"\n elif self.conditionType == \"hot\":\n printStr += \" The heat from the %s seems to radiate downward, although the dark shadows provide a comforting respite in this otherwise \" % self.nearbyFeature\n printStr += \"unwelcoming world.\"\n else:\n printStr += \" The looming shadows, oppressive silence, and stench of this world make you long for the %s.\" % self.nearbyFeature\n return(printStr)\n # NOT UNDERDARK\n else:\n printStr = \"It is a %s %s %s in the %s.\" % (self.temperature, self.condition, self.timeOfDay, self.location)\n if self.location == \"coast\":\n printStr = printStr.replace(\" in \", \" on \")\n ## MOON\n if self.timeOfDay in ['night','twilight','evening','midnight']:\n if not self.conditionType == \"wet\":\n if not self.moonPhase == \"new\":\n typesOfGlows = ['a calming','an eerie','an otherworldly','a dreamy']\n glow = typesOfGlows[randint(0,len(typesOfGlows)-1)]\n printStr += \" The %s moon casts %s glow on the landscape.\" % (self.moonPhase, glow)\n if self.moonPhase == \"full\":\n printStr += \" The ominous howl of a nearby creature fills you with unease.\"\n ## WET\n if self.conditionType == \"wet\":\n printStr += \" Rain pelts the %s all around.\" % self.nearbyFeature\n if self.randNum < 4:\n printStr += \" Floodwaters rise ever higher. You search for nearby debris on which to cling for safety.\"\n elif self.randNum < 12:\n printStr += \" The deafening peal of thunder reverberates off of the %s nearby as blinding flash of lightning strikes not far away.\" % self.nearbyFeature\n elif self.temperature == \"hot\":\n printStr += \" The cool rain brings you welcome relief from the heat of the %s.\" % self.timeOfDay\n elif self.condition == \"rainy\":\n printStr += \" You turn your face to the sky and soak in the cleansing rain.\"\n else:\n printStr += \" You seek shelter anywhere you can in an attempt to stay dry.\"\n ## COLD\n elif self.conditionType == \"cold\":\n if self.randNum <= 30:\n printStr += \" A chill radiates off of the %s around you. You pull your coat close to your body in an attempt to keep warm.\" % self.nearbyFeature\n elif self.randNum < 50:\n printStr += \" You scowl at your empty tinderbox and useless flint as you plod through the %s trying to keep your body warm.\" % self.nearbyFeature\n elif self.randNum > 94:\n printStr += \" The cold seeping in through your armor could not feel better as it cools your sweaty, battle-worn muscles.\"\n else:\n printStr += \" Frost coats the nearby %s; your breath rises in visible puffs. You shiver involuntarily.\" % self.nearbyFeature\n ## WINDY\n elif self.conditionType == \"wind\":\n printStr += \" Winds beat the %s around you.\" % self.nearbyFeature\n if self.condition == \"blustery\":\n garmentType = ['garments','loincloth','cape','dark cloak','robes']\n garmentWorn = garmentType[randint(0,len(garmentType)-1)]\n printStr += \" You lean into the oncoming wind and push through, your %s flapping behind you.\" % garmentWorn\n else:\n hatType = ['hat','helmet','feather cap','hood','tricorn hat','head scarf']\n hatWorn = hatType[randint(0,len(hatType)-1)]\n actionType = ['soldier on', 'charge', 'dash', 'crawl', 'plod wearily', 'march']\n actionTaken = actionType[randint(0,len(actionType)-1)]\n printStr += \" You hold your %s with one hand as you %s into the wind.\" % (hatWorn, actionTaken)\n ## HOT\n elif self.conditionType == \"hot\" or self.temperature == \"blisteringly hot\":\n if self.randNum < 10:\n printStr += \" You impotently lick your sun-battered lips with your dry tongue as you feverishly look for any sign of fresh water.\"\n elif self.randNum < 20:\n printStr += \" A blinding light reflects off of the %s adjacent from you; a taunting reminder of the utter lack of respite from the heat.\" % self.nearbyFeature\n elif self.randNum > 95:\n printStr += \" The heat radiating off of the nearby %s makes the cool touch of the sparkling lake in which you swim all the more refreshing.\" % self.nearbyFeature\n else:\n printStr += \" An uncomfortable heat radiates off of the %s around you. Your sweaty clothes stick to you as you move.\" % self.nearbyFeature\n ## SMOKY\n elif self.condition == \"smoky\":\n thingsThatBurn = ['the nearby village',\"the enemy's encampment\", \"the vermin's nest\", 'what used to be your home','the weapons of war']\n if self.location == 'coast':\n thingsThatBurn.extend(['your defeated armada','the treasure galleon','the docks',\"the enemy's fleet\",'the shipwreck','the mermaid\\'s alcove'])\n elif self.location == 'mountains':\n thingsThatBurn.extend(['cliff dwellings',\"eagles' nests\",'the mining encampment','the log cabin'])\n elif self.location == 'grasslands':\n thingsThatBurn.extend(['the dry grass','the landscape','the catfolk village','the vineyard'])\n elif self.location == 'jungle':\n thingsThatBurn.extend([\"the hunter's perch\",'the hideout','the temple','the only bridge across the gaping chasm','the secret weapon'])\n elif self.location == 'city':\n thingsThatBurn.extend(['the tavern','the prison','the gallows','the district',\"the nobleman's abode\",'the slave quarters','the magic academy', \\\n 'the thieves guild','your home',\"that greedy bastard's home\",'the safehouse','the warehouse'])\n elif self.location == 'settlement':\n thingsThatBurn.extend(['the food storage','the church','the adjacent woods',\"the monster's body\",'the general store',\"the woodcutter's shop\"])\n elif self.location == 'fort':\n thingsThatBurn.extend([\"the captain's abode\",'the protective wall','the map room','the stables','the war machines','the weapons store',\"the raven's roost\"])\n elif self.location == 'marsh':\n thingsThatBurn.extend(['the hatchery', 'the dam', 'the coven house', 'the manticore den', 'the lizardfolk village',\"the green hag's cabin\"])\n elif self.location == 'meadow':\n thingsThatBurn.extend(['the windmill', 'the cottage'])\n elif self.location == 'forest':\n thingsThatBurn.extend(['the ettercap den'])\n elif self.location == 'hills':\n thingsThatBurn.extend([\"the hill giant's home\"])\n \n whatBurned = thingsThatBurn[randint(0,len(thingsThatBurn)-1)]\n \n if self.randNum < 4:\n printStr += \" The charred remains of %s mark your irrevocable decent into evil.\" % whatBurned\n elif self.randNum < 7:\n printStr += \" The charred remains of %s serve as a reminder of your unquestioning devotion to your deity.\" % whatBurned\n elif self.conditionType == 'cold' and self.randNum < 50:\n printStr += \" The smouldering remains of %s provide at least a little warmth to your otherwise numb body.\" % whatBurned\n elif self.conditionType == 'wet' and self.randNum < 50:\n printStr += \" The smouldering remains of %s sizzle with each drop of precipitation unlucky enough to target it.\" % whatBurned\n elif self.randNum < 30:\n printStr += \" The charred remains of %s scar the land.\" % whatBurned\n elif self.randNum < 60:\n printStr += \" The charred remains of %s fill you with pride.\" % whatBurned\n elif self.randNum < 80:\n printStr += \" The charred remains of %s fill you with rage.\" % whatBurned\n elif self.randNum < 99:\n printStr += \" The charred remains of %s fill you with sadness.\" % whatBurned\n elif self.randNum < 100:\n printStr += \" The thick hookah smoke still pleasantly burns your nostrils and massages your mind.\" % whatBurned\n \n return(printStr)\n","sub_path":"mod/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":14547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"59235497","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n# imports\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n\n# read in data\ndf = pd.read_csv('./players_scores.csv').set_index('student_name')\nprint(df)\n\n\n# ## 1 Data Description\n\n# In[3]:\n\n\nscores = df['data_science_score']\n# scores = df['math_score']\n\n\n# #### 1. Calculate mean, median, and mode of Data Science scores\n\n# In[4]:\n\n\nprint('Mean: {}, Median: {}, Mode: {}'.format(\nscores.mean(), scores.median(), scores.mode().values))\n\n\n# #### 2. Calculate variance and standard deviation of Data Science scores\n\n# In[5]:\n\n\nprint('Variance: {}, Standard Deviation: {}'.format(\nscores.std()**2, scores.std()))\n\n\n# #### 3. Incremental Mean/Variance Functions\n\n# In[6]:\n\n\ndef incremental_mean(mu, n, x_new):\n return ((n*mu)+x_new)/(n+1)\n\n\n# In[7]:\n\n\ndef incremental_var(v, mu, n, x_new):\n mu_new = incremental_mean(mu, n, x_new)\n return ((n-1)*(v) + (x_new - mu)*(x_new - mu_new))/n\n\n\n# In[8]:\n\n\nu_prime = incremental_mean(mu=scores.mean(), n=len(scores), x_new=100)\nprint('u\\' = {}'.format(u_prime))\n\n\n# In[9]:\n\n\nv_prime = incremental_var(scores.std()**2, scores.mean(), len(scores), 100)\nprint('v\\' = {}'.format(v_prime))\n\n\n# #### Verify Function Correctness\n\n# In[10]:\n\n\nscores_plus = scores.append(pd.Series(100))\nprint('u\\' = {}'.format(scores_plus.mean()))\nprint('v\\' = {}'.format(scores_plus.std()**2))\n","sub_path":"ptinsley-HW1-Q1.py","file_name":"ptinsley-HW1-Q1.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"446582132","text":"# -*- coding: utf-8 -*-\n\"\"\"\n【概要】\n\n【注意】\n\n【動作環境】\nWindows / MacOS, python 2.7系列\n\n【作成者】\n芝(2016/04/04)\nshiba.shintaro@gmail.com\n\n【備考】\n\n\"\"\"\n\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\nimport scipy.fftpack\n\n\ndf = pd.read_csv(\"20160312_170222.csv\", header=9, names=[\"ch1\", \"ch2\", \"ch3\", \"ch4\", \"ch5\"])\n\nch1 = np.array(df[\"ch1\"].as_matrix())\nch2 = np.array(df[\"ch2\"].as_matrix())\ntrig = np.array(df[\"ch5\"].as_matrix())\n\n\n\nstart = 0 # サンプリングする開始位置\nN = 256 # FFTのサンプル数\n\nX = np.fft.fft(x[start:start+N]) # FFT\n# X = scipy.fftpack.fft(x[start:start+N]) # scipy\n\nfreqList = np.fft.fftfreq(N, d=1.0/fs) # 周波数軸の値を計算\n# freqList = scipy.fftpack.fftfreq(N, d=1.0/ fs) # scipy版\n\namplitudeSpectrum = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in X] # 振幅スペクトル\nphaseSpectrum = [np.arctan2(int(c.imag), int(c.real)) for c in X] # 位相スペクトル\n\n# 波形を描画\nsubplot(311) # 3行1列のグラフの1番目の位置にプロット\nplot(range(start, start+N), x[start:start+N])\naxis([start, start+N, -1.0, 1.0])\nxlabel(\"time [sample]\")\nylabel(\"amplitude\")\n\n# 振幅スペクトルを描画\nsubplot(312)\nplot(freqList, amplitudeSpectrum, marker= 'o', linestyle='-')\naxis([0, fs/2, 0, 50])\nxlabel(\"frequency [Hz]\")\nylabel(\"amplitude spectrum\")\n\n# 位相スペクトルを描画\nsubplot(313)\nplot(freqList, phaseSpectrum, marker= 'o', linestyle='-')\naxis([0, fs/2, -np.pi, np.pi])\nxlabel(\"frequency [Hz]\")\nylabel(\"phase spectrum\")\n\nshow()\n","sub_path":"src/readLFP.py","file_name":"readLFP.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"575467999","text":"from scipy.spatial.distance import euclidean\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\nclass KNN():\n def __init__(self):\n self.X_train = None\n self.y_train = None\n #Given: X_train -> list of values\n # y_train -> list of different types of labels for values\n def fit(self, X_train, y_train):\n self.X_train = X_train\n self.y_train = y_train\n\n #Given: x -> a value\n #function: returns distances from that value\n def _get_distances(self, x):\n dists = []\n i = 0\n for y in self.X_train:\n dists.append((i, euclidean(x, y)))\n i += 1\n return dists\n #Given : dists -> list of distaces from point\n # k -> quantity of top distances wanted returned\n #get nearest neighbors\n #return k amount of them\n def _get_k_nearest(self, dists, k):\n k_nearest = sorted(dists, key = lambda x : x[1])\n return k_nearest[:k]\n\n #Given: none\n #Return the label that is most common from nearest neighbors\n def _get_label_prediction(self, k_nearest):\n labels = [self.y_train[i] for i, _ in k_nearest]\n return np.argmax(np.bincount(labels))\n\n #Given: X_test -> test values\n # k -> number of nearest neighbors\n #return list of labels for nearest neighbors at each test point\n def predict(self, X_test, k = 3):\n preds = []\n for x in X_test:\n dists = self._get_distances(x)\n nearest = self._get_k_nearest(dists, k)\n preds.append(self._get_label_prediction(nearest))\n return preds\n\n\niris = load_iris()\ndata = iris.data\ntarget = iris.target\n\nX_train, X_test, y_train, y_test = train_test_split(data, target, random_state = 0, train_size = 0.25)\n\nknn = KNN()\nknn.fit(X_train, y_train)\npreds = knn.predict(X_test, 3)\n\nprint(\"Testing Accuracy: {}\".format(accuracy_score(preds, y_test)))\n","sub_path":"week-8/knn/K-NearestNeighbors-Lab.py","file_name":"K-NearestNeighbors-Lab.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"438723959","text":"\r\noperator_attributes_dictionary = {\r\n '+': (1, False),\r\n '-': (1, True),\r\n '*': (2, False),\r\n '/': (2, False),\r\n '^': (3, False),\r\n '~': (6, True),\r\n '%': (4, False),\r\n '!': (6, True),\r\n '@': (5, False),\r\n '$': (5, False),\r\n '&': (5, False)\r\n}\r\n\r\n\r\ndef check_power(char_of_operator):\r\n (power, do_not_need) = operator_attributes_dictionary.get(char_of_operator)\r\n return power\r\n\r\n\r\ndef check_if_can_be_duplicated(char_of_operator):\r\n (do_not_need, can_be_duplicated) = \\\r\n operator_attributes_dictionary.get(char_of_operator)\r\n return can_be_duplicated\r\n","sub_path":"Omega/operator_attributes.py","file_name":"operator_attributes.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"588470101","text":"import numpy as np\nimport cv2\n\n# reading image from a particular location\n#It's always better to give the whole path of the image location\n#the format is img=imread(',offset) .. offset to be set to zero to get an grayscale version of image\n#if you just want to load the picture as it is there is no need to give offset value or set it to 255\nimg = cv2.imread('//Desktop/OpenCV/opencv/myprograms/iphone.jpeg',0) \n\n#displays the image\n#size of window is automatically set to fit the size of image\ncv2.imshow('image',img)\n\n#waitKey(0) function is necessary to keep the image window open\n#add & 0xFF only on 64 bit OS, not necessary for 32 bit one\nk = cv2.waitKey(0) & 0xFF\n\n#waiting for a keystroke\n#if ESC is pressed, image window just exits, 27 is ASCII code for ESC character\n#if key 's' is pressed image is saved to the the directory of existing rgb image and image window exits\nif k == 27: # wait for ESC key to exit\n cv2.destroyAllWindows()\nelif k == ord('s'): # wait for 's' key to save and exit\n cv2.imwrite('iphonegray.png',img)\n cv2.destroyAllWindows()\n","sub_path":"RGBtoGREY.py","file_name":"RGBtoGREY.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"177975235","text":"import logging\n\nfrom chatterbot.conversation import Statement\nfrom flask_restful import Resource\n\nfrom .nameparser import extract_names_regex\nfrom .parsers import query_parser, teach_parser, auth_parser\nfrom .logic.requests import Request, text, InputRequest, InvalidTokenError\nfrom common.utils import obscure\n\nlog = logging.getLogger(__name__)\n\n\nclass WaldurResource(Resource):\n \"\"\"\n Parent class of all Waldur resources\n \"\"\"\n def __init__(self, chatbot):\n \"\"\"\n :param chatbot: Chatterbot bot\n \"\"\"\n self.chatbot = chatbot\n self.response = None\n self.code = 200\n\n\nclass Query(WaldurResource):\n \"\"\"\n Resource to get answers from the underlying Chatterbot\n \"\"\"\n\n def __init__(self, chatbot, tokens_for_input):\n \"\"\"\n :param tokens_for_input: dict of {token: InputRequest, ...}\n \"\"\"\n super(Query, self).__init__(chatbot)\n self.tokens_for_input = tokens_for_input\n\n args = query_parser.parse_args()\n self.query = args.query\n self.token = None\n\n if args.Authorization is not None:\n if args.Authorization.startswith(\"token \"):\n self.token = args.Authorization[6:]\n else:\n self.token = args.Authorization\n\n log.info(f\"Query initialized with {{query: '{self.query}', token: '{obscure(self.token)}'}}\")\n\n if log.isEnabledFor(logging.DEBUG):\n obscured_tokens = {obscure(x): self.tokens_for_input[x] for x in self.tokens_for_input}\n log.debug(f\"Tokens waiting for input: {obscured_tokens}\")\n\n def post(self):\n \"\"\"\n Entry point for POST /\n Gets a response statement from the bot\n :param: query - question/input for bot\n :param: token - Waldur API token\n :return: response, code\n \"\"\"\n try:\n if self.token is not None and self.token in self.tokens_for_input:\n self._handle_input()\n else:\n self._handle_query()\n except InvalidTokenError:\n self.response = dict(message='Invalid Waldur API token')\n self.code = 401\n\n log.info(f\"Query response: {self.response} code: {self.code}\")\n return self.response, self.code\n\n def _handle_query(self):\n\n # Todo: Make it look better\n names_excluded = self.query\n for x in extract_names_regex(self.query):\n for splitted in x.split():\n names_excluded = names_excluded.replace(splitted, \"\").strip()\n names_excluded = \" \".join(names_excluded.split())\n \n bot_response = str(self.chatbot.get_response(names_excluded))\n log.debug(f\"Bot response: '{bot_response}'\")\n\n if bot_response.startswith(\"REQUEST\"):\n req = Request\\\n .from_string(bot_response)\\\n .set_token(self.token)\\\n .set_original(self.query)\n\n self.response = req.process()\n\n if isinstance(req, InputRequest):\n self.tokens_for_input[self.token] = req\n\n else:\n self.response = text(bot_response)\n\n def _handle_input(self):\n req = self.tokens_for_input[self.token]\\\n .set_input(self.query)\n\n self.response = req.process()\n\n if not req.waiting_for_input:\n del self.tokens_for_input[self.token]\n\n\nclass Teach(WaldurResource):\n \"\"\"\n Resource to give answers to the underlying Chatterbot\n \"\"\"\n\n def __init__(self, chatbot):\n super(Teach, self).__init__(chatbot)\n\n args = teach_parser.parse_args()\n self.statement = args.statement\n self.previous_statement = args.previous_statement\n\n log.info(f\"Teach initialized with {{statement: '{self.statement}', \"\n f\"previous_statement: '{self.previous_statement}'}}\")\n\n def post(self):\n \"\"\"\n Entry point for POST /teach/\n Teaches the bot that 'statement' is a valid response to 'previous_statement'\n :param: statement\n :param: previous_statement\n :return: response, code\n \"\"\"\n\n self.chatbot.learn_response(Statement(self.statement), Statement(self.previous_statement))\n return text(f\"Added '{self.statement}' as a response to '{self.previous_statement}'\"), 200\n\n\nclass Authenticate(Resource):\n \"\"\"\n Resource to intermediate token to frontend\n Not very secure\n \"\"\"\n\n def __init__(self, auth_tokens):\n \"\"\"\n :param auth_tokens: dict of {user_id: token, ...}\n \"\"\"\n self.auth_tokens = auth_tokens\n\n def post(self, user_id):\n \"\"\"\n Entry point for POST /auth/\n :param user_id: user_id to tie the token to\n :param: token from POST body\n :return: response, code\n \"\"\"\n args = auth_parser.parse_args()\n\n log.info(f\"token {obscure(args.token)} received for {user_id}\")\n\n self.auth_tokens[user_id] = args.token\n\n return {'message': 'ok'}, 200\n\n def get(self, user_id):\n \"\"\"\n Entry point for GET /auth/\n :param user_id: user_id to get the token for\n :return: response, code\n \"\"\"\n log.info(f\"token asked for {user_id}\")\n\n if user_id in self.auth_tokens:\n token = self.auth_tokens[user_id]\n del self.auth_tokens[user_id]\n return {'token': token}, 200\n else:\n return {'message': f\"No token for {user_id}\"}, 404\n","sub_path":"backend/waldur/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"121293852","text":"\n# coding: utf-8\n\n# In[3]:\n\n\n\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_recall_fscore_support as score\n\n\n\n\n# In[6]:\n\nclass accu_scores:\n def __init__(self,y_test, y_pred_class):\n self.y_test = y_test\n self.y_pred_class = y_pred_class\n self.simpleAccu()\n self.confMat()\n \n precision, recall, fscore, support = score(y_test, y_pred_class)\n\n print('precision: {}'.format(precision))\n print('recall: {}'.format(recall))\n print('fscore: {}'.format(fscore))\n print('support: {}'.format(support))\n def simpleAccu(self):\n print(\"simple accuracy : \",metrics.accuracy_score(self.y_test, self.y_pred_class,))\n def confMat(self):\n print(\"confusion matrix : \", metrics.confusion_matrix(self.y_test, self.y_pred_class))\n\n \n\n\n# In[ ]:\n\n\n\n","sub_path":"Multi Class/AccuracyMultiCLF.py","file_name":"AccuracyMultiCLF.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"494636187","text":"\"\"\"Sliding window maximum\nleetcode\n\nGiven an array nums, there is a sliding window of size k which is moving from\nthe very left of the array to the very right. You can only see the k numbers in\nthe window. Each time the sliding window moves right by one position.\n\nFor example,\nGiven nums = [1,3,-1,-3,5,3,6,7], and k = 3.\n\nWindow position Max\n--------------- -----\n[1 3 -1] -3 5 3 6 7 3\n 1 [3 -1 -3] 5 3 6 7 3\n 1 3 [-1 -3 5] 3 6 7 5\n 1 3 -1 [-3 5 3] 6 7 5\n 1 3 -1 -3 [5 3 6] 7 6\n 1 3 -1 -3 5 [3 6 7] 7\nTherefore, return the max sliding window as [3,3,5,5,6,7].\n\"\"\"\n\nfrom collections import deque\n\nclass Solution(object):\n def maxSlidingWindow(self, nums, k):\n \"\"\"Return sequence of max value of all slicding windows from left to right.\n\n 1. Use deque for window so popleft is really O(1). For list it's O(n)\n 2. Keep tracking max window value. If the element is about to enqueue is\n larger or equal than max value, update max value. If the element is about\n to be dequed is equal to max value, do the max for new window\n\n The worst case is O(kn), e.g., [5, 4, 3, 2, 1] because every time you move the\n window, the popped one is max value of current window then max will be called\n If Queue.PriofityQueue is used to keep track of window, you can have max with\n O(n), but it's hard to remove element by it's index.\n\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n if not nums:\n return []\n\n if len(nums) < k:\n raise ValueError('Invalid window size: {}'.format(k))\n\n window = deque(nums[:k])\n current_max = max(window)\n max_windows = [current_max]\n\n for i, val in enumerate(nums, start=k):\n popped = window.popleft()\n window.append(val)\n if val >= current_max:\n current_max = val\n elif popped == current_max:\n current_max = max(window)\n else:\n pass\n\n max_windows.append(current_max)\n\n return max_windows[k:]\n\n\ndef main():\n sol = Solution()\n nums = [1, 3, -1, -3, 5, 3, 6, 7]\n k = 3\n assert sol.maxSlidingWindow(nums, k) == [3, 3, 5, 5, 6, 7]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"arr/leetcode_Sliding_Window_Maximum.py","file_name":"leetcode_Sliding_Window_Maximum.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"281502196","text":"import threading\n\nfrom villas.node.node import Node\nfrom villas.controller.components.manager import Manager\nfrom villas.controller.components.gateways.villas_node import VILLASnodeGateway\n\n\nclass VILLASnodeManager(Manager):\n\n def __init__(self, **args):\n\n self.autostart = args.get('autostart', False)\n self.api_url = args.get('api_url', 'http://localhost:8080')\n self.api_url_external = args.get('api_url_external', self.api_url)\n\n args['api_url'] = self.api_url\n\n self.thread_stop = threading.Event()\n self.thread = threading.Thread(target=self.reconcile_periodically)\n\n self.node = Node(**args)\n\n self._status = self.node.status\n\n args['uuid'] = self._status.get('uuid')\n\n super().__init__(**args)\n\n def reconcile_periodically(self):\n while not self.thread_stop.wait(2):\n self.reconcile()\n\n def reconcile(self):\n try:\n self._status = self.node.status\n self._nodes = self.node.nodes\n\n for node in self._nodes:\n self.logger.debug('Found node %s on gateway: %s',\n node['name'], node)\n\n if node['uuid'] in self.components:\n ic = self.components[node['uuid']]\n\n # Update state\n ic.change_state(node['state'])\n else:\n ic = VILLASnodeGateway(self, node)\n\n self.add_component(ic)\n\n self.change_state('running')\n\n except Exception as e:\n self.change_to_error('failed to reconcile',\n exception=str(e),\n args=str(e.args))\n\n @property\n def status(self):\n status = super().status\n\n status['status']['villas_node_version'] = self._status.get('version')\n\n return status\n\n def on_ready(self):\n if self.autostart and not self.node.is_running():\n self.start()\n\n self.thread.start()\n\n super().on_ready()\n\n def on_shutdown(self):\n self.thread_stop.set()\n self.thread.join()\n\n return super().on_shutdown()\n\n def start(self, payload):\n self.node.start()\n\n self.change_state('starting')\n\n def stop(self, payload):\n if self.node.is_running():\n self.node.stop()\n\n self.change_state('idle')\n\n # Once the gateway shutsdown, all the gateway nodes are also shutdown\n for node in self.nodes:\n node.change_state('shutdown')\n\n def pause(self, payload):\n self.node.pause()\n\n self.change_state('paused')\n\n # Once the gateway shutsdown, all the gateway nodes are also shutdown\n for node in self.nodes:\n node.change_state('paused')\n\n def resume(self, payload):\n self.node.resume()\n\n def reset(self, payload):\n self.node.restart()\n","sub_path":"villas/controller/components/managers/villas_node.py","file_name":"villas_node.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"132058052","text":"assignments = []\nrows = 'ABCDEFGHI'\ncols = '123456789'\n\ndef cross(A, B):\n \"\"\"Cross product of elements in A and elements in B\n \"\"\"\n return [s+t for s in A for t in B]\n\n# set up all boxe labels, units, and peers dictionary\nboxes = cross(rows, cols)\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\ndiagonal_units = [list(map(lambda x:\"\".join(x),list(zip(rows,cols)))),list(map(lambda x:\"\".join(x),list(zip(rows,reversed(cols)))))]\nunitlist = row_units + column_units + square_units + diagonal_units # list of all units\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes) # mapping of box -> the units the box belongs to\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes) # mapping of box -> the peers of the box\n\ndef assign_value(values, box, value):\n \"\"\"\n Please use this function to update your values dictionary!\n Assigns a value to a given box. If it updates the board record it.\n \"\"\"\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values\n\ndef naked_twins(values):\n \"\"\"Eliminate values using the naked twins strategy.\n Args:\n values(dict): a dictionary of the form {'box_name': '123456789', ...}\n\n Returns:\n the values dictionary with the naked twins eliminated from peers.\n \"\"\"\n\n # Find all instances of naked twins\n for unit in unitlist:\n for i in range(9):\n box1 = unit[i]\n if len(values[box1]) != 2:\n continue\n for j in range(i + 1, 9):\n box2 = unit[j]\n if values[box1] == values[box2]: # found twins\n for peer in (set(unit) - set([box1, box2])): # eliminate the naked twins as possibilities for their peers\n for digit in values[box1]:\n values = assign_value(values, peer, values[peer].replace(digit,''))\n break\n\n return values\n\ndef grid_values(grid):\n \"\"\"\n Convert grid into a dict of {square: char} with '123456789' for empties.\n Args:\n grid(string) - A grid in string form.\n Returns:\n A grid in dictionary form\n Keys: The boxes, e.g., 'A1'\n Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.\n \"\"\"\n assert len(grid)==81\n available = '123456789'\n values = []\n for c in grid:\n if c == '.':\n values.append(available)\n else:\n values.append(c)\n return dict(zip(boxes,values))\n\ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Args:\n values(dict): The sudoku in dictionary form\n \"\"\"\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return\n\ndef eliminate(values):\n \"\"\"Eliminate values from peers of each box with a single value.\n\n Go through all the boxes, and whenever there is a box with a single value,\n eliminate this value from the set of values of all its peers.\n\n Args:\n values: Sudoku in dictionary form.\n Returns:\n Resulting Sudoku in dictionary form after eliminating values.\n \"\"\"\n \n for box in values:\n if len(values[box]) == 1:\n digit = values[box]\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(digit,'')) # eliminate this digit from values of peers\n \n return values\n\ndef only_choice(values):\n \"\"\"Finalize all values that are the only choice for a unit.\n\n Go through all the units, and whenever there is a unit with a value\n that only fits in one box, assign the value to this box.\n\n Args: \n values: Sudoku in dictionary form.\n Returns: \n Resulting Sudoku in dictionary form after filling in only choices.\n \"\"\"\n \n for unit in unitlist:\n digits = '123456789'\n for digit in digits:\n dplaces = [box for box in unit if digit in values[box]] # list of boxes where this digit is allowed in current unit\n if len(dplaces) == 1: # if there is only one box this digit could go in this unit\n values = assign_value(values, dplaces[0], digit) # assign this digit to this box\n\n return values\n\ndef reduce_puzzle(values):\n \"\"\"Reduce the search space for puzzle by repetitively applying eliminate, only choice, and naked twins strategy\n Stop iteration when there is no further progress\n Args: \n values: Sudoku in dictionary form.\n Returns:\n Resulting Sudoku after options are reduced\n \"\"\"\n stalled = False\n while not stalled:\n # Check how many boxes have a determined value\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n\n # Use the Eliminate Strategy to confine search space\n values = eliminate(values)\n\n # Use the Only Choice Strategy to confine search space\n values = only_choice(values)\n \n # Use the Naked Twins Strategy to confine search space\n values = naked_twins(values)\n\n # Check how many boxes have a determined value, to compare\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n # If no new values were added, stop the loop.\n stalled = solved_values_before == solved_values_after\n # Sanity check, return False if there is a box with zero available values:\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\ndef get_best_box(values):\n \"\"\"identify the best box for starting depth first search, which is an unsolved box containing least options\n Args: \n values: Sudoku in dictionary form.\n Returns:\n string representing the best box to start depth first search\n \"\"\"\n min_len, best_box = min([(len(value),box)for box,value in values.items() if len(value) > 1])\n return best_box\n\n\ndef is_solved(values):\n \"\"\"determine whether a puzzle has been solved or not\n Args: \n values: Sudoku in dictionary form.\n Returns:\n bool: True for solved, False for unsolved\n \"\"\"\n unsolved_values = len([box for box in values.keys() if len(values[box]) != 1])\n return(unsolved_values == 0)\n\n\ndef search(values):\n \"\"\"Using depth-first search and propagation, create a search tree and solve the sudoku.\n Args: \n values: Sudoku in dictionary form.\n Returns:\n dictionary representing solved sodoku, or False if no solution is found\n \"\"\"\n # First, reduce the puzzle\n values = reduce_puzzle(values)\n \n # If previous step hits a dead-end, returns False\n if values is False:\n return False\n \n # If puzzle is solved, return the answer\n if is_solved(values):\n return values\n \n # Choose one of the unfilled squares with the fewest possibilities\n best_box = get_best_box(values)\n \n # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\n for digit in values[best_box]: # choose a possible value\n new_values = values.copy() # make a copy of the current puzzle for backtracking\n values[best_box] = digit # assign that value to the box\n ans = search(new_values) # start searching\n if ans: \n return ans # returns answer if puzzle is solved\n\n return False # sanity check, returns False if no solution is found\n \n\ndef solve(grid):\n \"\"\"\n Find the solution to a Sudoku grid.\n Args:\n grid(string): a string representing a sudoku grid.\n Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n Returns:\n The dictionary representation of the final sudoku grid. False if no solution exists.\n \"\"\"\n \n values = grid_values(grid)\n sudoku = search(values)\n if sudoku:\n return sudoku\n \n\nif __name__ == '__main__':\n \n grid = \"..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..\"\n \n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n display(solve(diag_sudoku_grid))\n\n try:\n from visualize import visualize_assignments\n visualize_assignments(assignments)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":8793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"97946405","text":"from django.urls import path\r\nfrom django.contrib import admin\r\n\r\nfrom .views import testview,login_view,register_view,logout_view\r\n\r\napp_name=\"translators\"\r\n\r\nurlpatterns = [\r\n \r\n path('', testview),\r\n path('login/',login_view, name='login'),\r\n path('register/', register_view, name='register'),\r\n path('logout/', logout_view, name='logout'),\r\n]","sub_path":"src/translators/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"259658251","text":"import re\nimport openpyxl as op\ndef exceliando():\n\texcel=op.load_workbook(\"nuevoenviolibrosya.xlsx\")\n\thoja=excel['Hoja1']\n\tlargo=len(hoja['A'])+1\n\tfor x in range(2,largo):\n\t\ttitul=hoja.cell(row=x,column=2).value\n\t\tprint(titul)\n\t\tprint(type(titul))\n\t\thoja.cell(row=x,column=2).value=corrector_titulo(titul)\n\t\tautor=hoja.cell(row=x,column=3).value\n\t\thoja.cell(row=x,column=3).value=corrector_titulo(autor)\n\n\texcel.save(\"nuevoenviolibrosya.xlsx\")\t\ndef corrector_titulo(pretitulo):\n\tretitulo=re.search(r\"(.*), ([LE][A-Z]\\w?)(.*)\", pretitulo)\n\tif retitulo == None:\n\t\tprepre=pretitulo\n\telse:\n\t\ttitulo=(retitulo[2]+\" \"+retitulo[1]+\" \"+retitulo[3]).strip()\n\t\tprepre=titulo\n\tretitulo2=re.search(r\"([\\w 0-9]*)(/L)([\\w 0-9]*)\",prepre)\n\tif retitulo2 == None:\n\t\treturn prepre.strip()\n\telse:\n\t\ttitulo=(retitulo2[1]+retitulo2[3]).strip()\n\t\treturn titulo\ndef corrector_autor(preautor):\t\n\treautor = re.search(r\"([\\w ]*), ([\\w ]*)\",preautor)\n\tif reautor == None:\n\t\treturn (preautor.strip(),preautor.strip())\n\telse:\t\n\t\tautor=reautor[2]+\" \"+reautor[1]\n\t\tapellido=reautor[1]\n\t\treturn (autor, apellido)\n#exceliando()\t\t","sub_path":"Principal/correctortitulo.py","file_name":"correctortitulo.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"231631036","text":"import time\nimport logging\nimport os\nimport uuid\nimport traceback\nimport pprint\nimport numpy as np\n\nfrom .advisor import make_advisor\n\nlogger = logging.getLogger(__name__)\n\nclass InvalidAdvisorException(Exception):\n pass\n\nclass InvalidProposalException(Exception):\n pass\n\nclass AdvisorService(object):\n def __init__(self):\n self._advisors = {}\n\n def create_advisor(self, knob_config, advisor_id=None):\n is_created = False\n advisor = None\n\n if advisor_id is not None:\n advisor = self._get_advisor(advisor_id)\n \n if advisor is None:\n advisor_inst = make_advisor(knob_config)\n advisor = self._create_advisor(advisor_inst, knob_config, advisor_id)\n is_created = True\n\n return {\n 'id': advisor.id,\n 'is_created': is_created # Whether a new advisor has been created\n }\n\n def delete_advisor(self, advisor_id):\n is_deleted = False\n\n advisor = self._get_advisor(advisor_id)\n\n if advisor is not None:\n self._delete_advisor(advisor)\n is_deleted = True\n\n return {\n 'id': advisor_id,\n # Whether the advisor has been deleted (maybe it already has been deleted)\n 'is_deleted': is_deleted \n }\n\n def generate_proposal(self, advisor_id):\n advisor = self._get_advisor(advisor_id)\n knobs = self._generate_proposal(advisor)\n\n return {\n 'knobs': knobs\n }\n\n # Feedbacks to the advisor on the score of a set of knobs\n # Additionally, returns another proposal of knobs after ingesting feedback\n def feedback(self, advisor_id, knobs, score):\n advisor = self._get_advisor(advisor_id)\n\n if advisor is None:\n raise InvalidAdvisorException()\n\n advisor_inst = advisor.advisor_inst\n advisor_inst.feedback(knobs, score)\n knobs = self._generate_proposal(advisor)\n\n return {\n 'knobs': knobs\n }\n\n def _create_advisor(self, advisor_inst, knob_config, advisor_id=None):\n advisor = Advisor(advisor_inst, knob_config, advisor_id)\n self._advisors[advisor.id] = advisor\n return advisor\n\n def _get_advisor(self, advisor_id):\n if advisor_id not in self._advisors:\n return None\n\n advisor = self._advisors[advisor_id]\n return advisor\n\n def _update_advisor(self, advisor, advisor_inst):\n advisor.advisor_inst = advisor_inst\n return advisor\n\n def _delete_advisor(self, advisor):\n del self._advisors[advisor.id]\n\n def _generate_proposal(self, advisor):\n knobs = advisor.advisor_inst.propose()\n\n # Simplify knobs to use JSON serializable values\n knobs = {\n name: self._simplify_value(value)\n for name, value\n in knobs.items()\n }\n\n return knobs\n\n def _simplify_value(self, value):\n # TODO: Support int64 & other non-serializable data formats\n if isinstance(value, np.int64):\n return int(value)\n\n return value\n\nclass Advisor(object):\n def __init__(self, advisor_inst, knob_config, advisor_id=None):\n if advisor_id is not None:\n self.id = advisor_id\n else:\n self.id = str(uuid.uuid4())\n \n self.advisor_inst = advisor_inst\n self.knob_config = knob_config\n","sub_path":"rafiki/advisor/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"484591488","text":"import numpy as np\n\ndef rgb2gray(image):\n size = image.shape\n if size[2] == 3:\n imout = np.zeros([size[0], size[1]])\n imout[:, :] = image[:, :, 1]\n return imout\n else:\n print(\"wrong image format\")\n return image","sub_path":"8 Year 3 University Python Head Pose Estimation Project/rgb2gray.py","file_name":"rgb2gray.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"650462616","text":"\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport scipy.stats as sta\r\nimport statsmodels.formula.api as smf\r\nfrom matplotlib.patches import Ellipse\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import (accuracy_score, mean_absolute_error,\r\n mean_squared_error, r2_score)\r\nfrom sklearn.model_selection import KFold, train_test_split\r\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\r\nfrom statsmodels.stats.outliers_influence import summary_table\r\n\r\n# reading and loading into dataframe\r\ndf = pd.read_csv('P1training.csv')\r\ndf['date'] = pd.to_datetime(df['date'])\r\ndf['hour'] = pd.to_datetime(df['date']).dt.hour\r\n#df['day']= pd.to_datetime(df['date']).dt.dayofweek\r\ndf.head()\r\n\r\npvalue_threshold = 0.05\r\n\r\n\r\ndef forward_selected(data, response):\r\n \"\"\"Linear model designed by forward selection.\r\n\r\n Parameters:\r\n -----------\r\n data : pandas DataFrame with all possible predictors and response\r\n\r\n response: string, name of response column in data\r\n\r\n Returns:\r\n --------\r\n model: an \"optimal\" fitted statsmodels linear model\r\n with an intercept\r\n selected by forward selection\r\n evaluated by RMSE\r\n \"\"\"\r\n cols = [col for col in df.columns if response not in col]\r\n cols = [col for col in feature_cols if 'date' not in col]\r\n counter = cols.copy()\r\n # list to store selected predictors\r\n selected = []\r\n # variables to tore score:\r\n best = 0.0\r\n current = 0.0\r\n\r\n # split into test train\r\n kf = KFold(n_splits=10, shuffle=True, random_state=2)\r\n result = next(kf.split(df), None)\r\n train = df.iloc[result[0]]\r\n test = df.iloc[result[1]]\r\n # constructure formua\r\n for feat in cols:\r\n selected.append(feat)\r\n formula = \"{0} ~ 1 + {1}\".format(response, ' + '.join(selected))\r\n model = smf.ols(formula, data=train).fit()\r\n y_pred = model.predict(test[selected])\r\n print('RMSE:', np.sqrt(\r\n mean_squared_error(y_pred, test['Appliances'])))\r\n current = np.sqrt(mean_squared_error(y_pred, test['Appliances']))\r\n if best == 0.0:\r\n best = current\r\n print('best', best)\r\n if current > best:\r\n print('current more than best')\r\n print('currnet', current)\r\n print('best', best)\r\n selected.remove(feat)\r\n else:\r\n best = current\r\n pvalues = model.pvalues\r\n print('pvalues {}\\n'.format(pvalues))\r\n p_dict = pvalues.to_dict()\r\n for key, value in p_dict.items():\r\n if value >= pvalue_threshold:\r\n print('keys to remove',key)\r\n if key == 'Intercept':\r\n continue\r\n else:\r\n print('removing...')\r\n selected.remove(key)\r\n\r\n formula = \"{} ~ 1+ {}\".format(response,\r\n ' + '.join(selected))\r\n print(formula)\r\n model = smf.ols(formula, data=train).fit()\r\n print(model.summary())\r\n print('\\nParams:\\n', model.params)\r\n print('\\nPvalues:\\n', model.pvalues)\r\n predictions = model.predict(test[selected])\r\n print('RMSE:', np.sqrt(\r\n mean_squared_error(predictions, test[response])))\r\n return model\r\n\r\n\r\n# determine predictors\r\nfeature_cols = [col for col in df.columns if 'Appliances' not in col]\r\n\r\nmodel = forward_selected(df, 'Appliances')\r\n","sub_path":"applied_forecasting/forward_selection.py","file_name":"forward_selection.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"554915455","text":"from abc import ABCMeta, abstractmethod, abstractproperty\n\nimport numpy as np\nimport statsmodels.stats.multitest as mc\n\nfrom ..base import MetaResult\nfrom ..stats import p_to_z\n\n\nclass Corrector(metaclass=ABCMeta):\n '''\n Base class for multiple comparison correction methods.\n '''\n\n # The name of the method that must be implemented in an Estimator class\n # in order to override the default correction method.\n _correction_method = None\n\n # Maps that must be available in the MetaResult instance\n _required_maps = ('p',)\n\n def __init__(self):\n pass\n\n @abstractproperty\n def _name_suffix(self):\n pass\n\n def _validate_input(self, result):\n if not isinstance(result, MetaResult):\n raise ValueError(\"First argument to transform() must be an \"\n \"instance of class MetaResult, not {}.\"\n .format(type(result)))\n for rm in self._required_maps:\n if result.maps.get(rm) is None:\n raise ValueError(\"{0} requires {1} maps to be present in the \"\n \"MetaResult, but none were found.\"\n .format(type(self), rm))\n\n def _generate_secondary_maps(self, result, corr_maps):\n # Generates corrected version of z and log-p maps if they exist\n p = corr_maps['p']\n if 'z' in result.maps:\n corr_maps['z'] = p_to_z(p) * np.sign(result.maps['z'])\n if 'log_p' in result.maps:\n corr_maps['log_p'] = -np.log10(p)\n return corr_maps\n\n def transform(self, result):\n est = result.estimator\n correction_method = self._correction_method + '_' + self.method\n\n # Make sure we return a copy of the MetaResult\n result = result.copy()\n\n # If a correction method with the same name exists in the current\n # MetaEstimator, use it. Otherwise fall back on _transform.\n if (correction_method is not None and hasattr(est, correction_method)):\n corr_maps = getattr(est, correction_method)(result, **self.parameters)\n else:\n self._validate_input(result)\n corr_maps = self._transform(result)\n\n # Update corrected map names and add them to maps dict\n corr_maps = {(k + self._name_suffix): v for k, v in corr_maps.items()}\n result.maps.update(corr_maps)\n\n return result\n\n @abstractmethod\n def _transform(self, result, **kwargs):\n # Must return a dictionary of new maps to add to .maps, where keys are\n # map names and values are the maps. Names must _not_ include\n # the _name_suffix:, as that will be added in transform() (i.e.,\n # return \"p\" not \"p_corr-FDR_q-0.05_method-indep\").\n pass\n\n\nclass FWECorrector(Corrector):\n \"\"\"\n Perform family-wise error rate correction on a meta-analysis.\n\n Parameters\n ----------\n method : `obj`:str\n The FWE correction to use. Either 'bonferroni' or 'permutation'.\n **kwargs\n Keyword arguments to be used by the FWE correction implementation.\n \"\"\"\n\n _correction_method = '_fwe_correct'\n\n def __init__(self, method='bonferroni', **kwargs):\n self.method = method\n self.parameters = kwargs\n\n @property\n def _name_suffix(self):\n return '_corr-FWE_method-{}'.format(self.method)\n\n def _transform(self, result):\n p = result.maps['p']\n _, p_corr, _, _ = mc.multipletests(p, alpha=0.05, method=self.method,\n is_sorted=False)\n corr_maps = {'p': p_corr}\n self._generate_secondary_maps(result, corr_maps)\n return corr_maps\n\n\nclass FDRCorrector(Corrector):\n \"\"\"\n Perform false discovery rate correction on a meta-analysis.\n\n Parameters\n ----------\n q : `obj`:float\n The FDR correction rate to use.\n method : `obj`:str\n The FDR correction to use. Either 'indep' (for independent or\n positively correlated values) or 'negcorr' (for general or negatively\n correlated tests).\n \"\"\"\n\n _correction_method = '_fdr_correct'\n\n def __init__(self, method='indep', **kwargs):\n self.method = method\n self.parameters = kwargs\n\n @property\n def _name_suffix(self):\n return '_corr-FDR_method-{}'.format(self.method)\n\n def _transform(self, result):\n p = result.maps['p']\n _, p_corr = mc.fdrcorrection(p, alpha=self.q, method=self.method,\n is_sorted=False)\n corr_maps = {'p': p_corr}\n self._generate_secondary_maps(result, corr_maps)\n return corr_maps\n","sub_path":"nimare/correct/correct.py","file_name":"correct.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"367311734","text":"import numpy as np\nfrom scipy.optimize.optimize import approx_fprime\n\nfrom utils import ensure_1d\n\n\"\"\"\nImplementation of function objects.\nFunction objects encapsulate the behaviour of an objective function that we optimize.\nSimply put, implement evaluate(w, X, y) to get the numerical values corresponding to:\nf, the function value (scalar) and\ng, the gradient (vector).\n\nFunction objects are used with optimizers to navigate the parameter space and\nto find the optimal parameters (vector). See optimizers.py.\n\"\"\"\n\n\nclass FunObj:\n \"\"\"\n Function object for encapsulating evaluations of functions and gradients\n \"\"\"\n\n def evaluate(self, w, X, y):\n \"\"\"\n Evaluates the function AND its gradient w.r.t. w.\n Returns the numerical values based on the input.\n IMPORTANT: w is assumed to be a 1d-array, hence shaping will have to be handled.\n \"\"\"\n raise NotImplementedError(\"This is a base class, don't call this\")\n\n def check_correctness(self, w, X, y):\n n, d = X.shape\n estimated_gradient = approx_fprime(\n w, lambda w: self.evaluate(w, X, y)[0], epsilon=1e-6\n )\n _, implemented_gradient = self.evaluate(w, X, y)\n difference = estimated_gradient - implemented_gradient\n if np.max(np.abs(difference) > 1e-4):\n print(\n \"User and numerical derivatives differ: %s vs. %s\"\n % (estimated_gradient, implemented_gradient)\n )\n else:\n print(\"User and numerical derivatives agree.\")\n\n\nclass LeastSquaresLoss(FunObj):\n def evaluate(self, w, X, y):\n \"\"\"\n Evaluates the function and gradient of least squares objective.\n Least squares objective is the sum of squared residuals.\n \"\"\"\n # help avoid mistakes by potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n y_hat = X @ w\n m_residuals = y_hat - y # minus residuals, slightly more convenient here\n\n # Loss is sum of squared residuals\n f = 0.5 * np.sum(m_residuals ** 2)\n\n # The gradient, derived mathematically then implemented here\n g = X.T @ m_residuals # X^T X w - X^T y\n\n return f, g\n\n\nclass RobustRegressionLoss(FunObj):\n def evaluate(self, w, X, y):\n \"\"\"\n Evaluates the function and gradient of ROBUST least squares objective.\n \"\"\"\n # help avoid mistakes by potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n y_hat = X @ w\n residuals = y - y_hat\n exp_residuals = np.exp(residuals)\n exp_minuses = np.exp(-residuals)\n\n f = np.sum(np.log(exp_minuses + exp_residuals))\n\n # s is the negative of the \"soft sign\"\n s = (exp_minuses - exp_residuals) / (exp_minuses + exp_residuals)\n g = X.T @ s\n\n return f, g\n\n\nclass LogisticRegressionLoss(FunObj):\n def evaluate(self, w, X, y):\n \"\"\"\n Evaluates the function and gradient of logistics regression objective.\n \"\"\"\n # help avoid mistakes by potentially reshaping our arguments\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n Xw = X @ w\n yXw = y * Xw # element-wise multiply; the y_i are in {-1, 1}\n\n # Calculate the function value\n f = np.sum(np.log(1 + np.exp(-yXw)))\n\n # Calculate the gradient value\n s = -y / (1 + np.exp(yXw))\n g = X.T @ s\n\n return f, g\n\n\nclass LogisticRegressionLossL2(LogisticRegressionLoss):\n def __init__(self, lammy):\n super().__init__()\n self.lammy = lammy\n\n def evaluate(self, w, X, y):\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n \"\"\"YOUR CODE HERE FOR Q2.1\"\"\"\n raise NotImplementedError()\n\n\nclass LogisticRegressionLossL0(FunObj):\n def __init__(self, lammy):\n self.lammy = lammy\n\n def evaluate(self, w, X, y):\n \"\"\"\n Evaluates the function value of of L0-regularized logistics regression objective.\n \"\"\"\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n Xw = X @ w\n yXw = y * Xw # element-wise multiply\n\n # Calculate the function value\n f = np.sum(np.log(1.0 + np.exp(-yXw))) + self.lammy * np.sum(w != 0)\n\n # We cannot differentiate the \"length\" function\n g = None\n return f, g\n\n\nclass SoftmaxLoss(FunObj):\n def evaluate(self, w, X, y):\n w = ensure_1d(w)\n y = ensure_1d(y)\n\n n, d = X.shape\n k = len(np.unique(y))\n\n \"\"\"YOUR CODE HERE FOR Q3.4\"\"\"\n # Hint: you may want to use NumPy's reshape() or flatten()\n # to be consistent with our matrix notation.\n raise NotImplementedError()\n","sub_path":"assignments/a4/code/fun_obj.py","file_name":"fun_obj.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"635642311","text":"from __future__ import unicode_literals\n\nfrom django.views.generic import ListView, DetailView\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom django.contrib.auth.models import User\nfrom photo.models import Photo, ComplaintAtPhoto, get_countries_list\nfrom photo.forms import AddPhotoForm\n\n\nclass PhotoListView(ListView):\n model = Photo\n context_object_name = 'photos'\n template_name = 'base_photo.html'\n ordering = '-pk'\n paginate_by = 4\n\n def get_queryset(self):\n selected_country = self.request.GET.get('selected_country')\n if selected_country and selected_country in get_countries_list():\n photos = Photo.objects.filter(country=selected_country)\n else:\n photos = Photo.objects.all()\n return photos\n\n def get_context_data(self, **kwargs):\n context = super(PhotoListView, self).get_context_data(**kwargs)\n context['get_countries_list'] = get_countries_list()\n context['get_photos_on_map'] = Photo.objects.all()\n if self.request.GET:\n try:\n context['get_photos_on_map'] = Photo.objects.filter(country=self.request.GET.get('selected_country'))\n except:\n context['get_photos_on_map'] = Photo.objects.all()\n try:\n context['view_type'] = self.request.GET['view_type']\n except:\n context['view_type'] = ''\n try:\n context['selected_country'] = self.request.GET['selected_country']\n except:\n context['selected_country'] = ''\n return context\n\n\nclass PhotoDetailView(DetailView):\n model = Photo\n\n def get_context_data(self, **kwargs):\n context = super(PhotoDetailView, self).get_context_data(**kwargs)\n return context\n\n\ndef add_photo(request):\n if request.method == 'POST':\n form = AddPhotoForm(request.POST, request.FILES)\n try:\n if form.is_valid() and request.user.is_authenticated():\n obj = form.save(commit=False)\n obj.created_by = request.user\n obj.save()\n return HttpResponseRedirect('/photos/add-photo')\n except ObjectDoesNotExist:\n return HttpResponseRedirect('/photos/add-photo/error')\n else:\n form = AddPhotoForm()\n form.fields['gis_latitude'].widget.attrs['readonly'] = True\n form.fields['gis_longitude'].widget.attrs['readonly'] = True\n return render(request, 'photo/add_photo.html', {'form': form})\n\n\ndef remove_photo(request, slug):\n photo = Photo.objects.get(slug=slug)\n if request.user == photo.created_by:\n photo.delete()\n return HttpResponseRedirect('/users/%s' % request.user.username)\n\n\ndef complaint_at_photo(request, slug):\n if request.user.is_authenticated():\n photo = Photo.objects.get(slug=slug)\n user = User.objects.get(username=request.user)\n obj = ComplaintAtPhoto.objects.create(photo=photo, complaint_by=user)\n obj.save()\n return HttpResponseRedirect('/photos/')\n return HttpResponseRedirect('/photos/add-complaint/error')\n\n\n","sub_path":"photo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"84862751","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Case Specific Sorting of Strings\n# \n# ## Problem statement\n# Given a string consisting of uppercase and lowercase ASCII characters, write a function, `case_sort`, that sorts uppercase and lowercase letters separately, such that if the $i$th place in the original string had an uppercase character then it should not have a lowercase character after being sorted and vice versa.\n# \n# For example: \n# **Input:** fedRTSersUXJ \n# **Output:** deeJRSfrsTUX\n\n# In[6]:\n\n\ndef case_sort(string):\n \"\"\"\n Here are some pointers on how the function should work:\n 1. Sort the string\n 2. Create an empty output list\n 3. Iterate over original string\n if the character is lower-case:\n pick lower-case character from sorted string to place in output list\n else:\n pick upper-case character from sorted string to place in output list\n \n Note: You can use Python's inbuilt ord() function to find the ASCII value of a character\n \"\"\"\n up_chr_idx = 0\n lw_chr_idx = 0\n \n sorted_str = sorted(string)\n for idx,chr in enumerate(sorted_str):\n if 97 <= ord(chr)<=122:\n lw_chr_idx = idx\n break\n output = ''\n for ch in string:\n if 97 <=ord(ch) <=122:\n output+=sorted_str[lw_chr_idx]\n lw_chr_idx += 1\n else:\n output+=sorted_str[up_chr_idx]\n up_chr_idx += 1\n return output\n\n\n# \n\n# In[7]:\n\n\ndef test_function(test_case):\n test_string = test_case[0]\n solution = test_case[1]\n \n if case_sort(test_string) == solution:\n print(\"Pass\")\n else:\n print(\"False\")\n\n\n# In[8]:\n\n\ntest_string = 'fedRTSersUXJ'\nsolution = \"deeJRSfrsTUX\"\ntest_case = [test_string, solution]\ntest_function(test_case)\n\n\n# In[9]:\n\n\ntest_string = \"defRTSersUXI\"\nsolution = \"deeIRSfrsTUX\"\ntest_case = [test_string, solution]\ntest_function(test_case)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"basic algo/Case Specific Sorting of Strings.py","file_name":"Case Specific Sorting of Strings.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"426686724","text":"def conv_dec2bin_001(d):\n '''\n Практика 1, Слайд 12 , Задание 1\n Input: число в десятеричной системе счисления\u000B\n output: число в двоичной системе счисления\u000B\n 10 -> 2\n '''\n\n # словарь соответсвия чисел. Можно сделать для систем счисления свыще 16\n ref2 = {0:'0', 1:'1'}\n\n result = \"\"\n memd = d\n\n while d != 0:\n r = d % 2\n d = d // 2\n result += ref2[r]\n print (r, d, ref2[r])\n\n print(result)\n result = result[::-1]\n print(\"dec :\" + str(memd) + \" => bin \" + result)\n\n\ndef conv_bin2dec_002(n):\n '''\n Практика 1, Слайд 12 , Задание 2\n Input: число в двоичной системе счисления\u000B\n output: число в десятичной системе счисления\u000B\n 2 -> 10\n '''\n\n le = len(n)\n lst = list(n)\n pwr = []\n for x in lst:\n pwr.append(int(x))\n print(pwr)\n\n pos = 0\n result=0\n for x in pwr:\n pos += 1\n pwr[pos-1] = x*(2**(le-pos))\n result += pwr[pos-1]\n print(pwr)\n\n print(\"bin:\" + n + \" => dec:\" + str(result))\n\n\ndef conv_hex2dec_003(hexnum):\n '''\n Практика 1, Слайд 12 , Задание 3\n Input: число в шестнадцатеричной системе счисления\u000B\n output: число в десятичной системе счисления\u000B\n 16 -> 10\n '''\n ref16to10 = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,\n 'A': 10, 'B': 11, 'C' : 12, 'D' : 13, 'E': 14, 'F': 15}\n\n le = len(hexnum)\n lst = list(hexnum)\n print(lst)\n\n pwr = []\n pos = 0\n result=0\n for x16 in lst:\n pos += 1\n x10=ref16to10[x16]\n pwr.append(x10*(16**(le-pos)))\n result += pwr[pos - 1]\n print(pwr)\n print(result)\n\n return result\n\ndef conv_dec2hex_004(decnum):\n '''\n Практика 1, Слайд 12 , Задание 4\n 10 -> 16\n '''\n\n # словарь соответсвия чисел. Можно сделать для систем счисления свыще 16\n ref10to16 = {0:'0', 1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9',\n 10:'A', 11:'B', 12:'C', 13:'D', 14:'E', 15:'F'}\n\n base = 16\n result = \"\"\n memd = decnum\n\n while decnum != 0:\n r = decnum % base\n decnum = decnum // base\n result += ref10to16[r]\n print (r, decnum, ref10to16[r])\n\n print(result)\n result = result[::-1]\n print(\"dec :\" + str(memd) + \" => hex: \" + result)\n\n\n# 1\nd = int(input (\"dec->bin: Введите dec-число: \"))\nconv_dec2bin_001(d)\n\n# 2\nb = input (\"bin->dec: Введите bin-число: \")\nconv_bin2dec_002(b)\n\n# 3\nhexnum = input (\"hex->dec Введите hex-число: \")\nconv_hex2dec_003(hexnum)\n\n# 4\ndecnum = int(input (\"dec->hex Введите dec-число: \"))\nconv_dec2hex_004(decnum)\n\n\n","sub_path":"12_NumberSystems.py","file_name":"12_NumberSystems.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"580410862","text":"import json\r\nimport re\r\n\r\n\r\n# Parser for JSON string that consists of JSON array with JSON objects\r\ndef parse_json(lst):\r\n result = []\r\n structure = []\r\n stack = []\r\n pair = []\r\n for i, j in enumerate(lst):\r\n if '{' in j or '}' in j:\r\n for k in j:\r\n if k != '{' and k != '}':\r\n pass\r\n else:\r\n if k == '{':\r\n structure.append('{')\r\n elif k == '}' and structure[-1] == '{':\r\n structure.pop()\r\n dict_temp = dict(stack)\r\n result.append(dict_temp)\r\n stack = []\r\n else:\r\n print('Error in structure')\r\n\r\n elif ':' in j and ':' in lst[i-1]:\r\n text = re.sub(r'\\s+', ' ', j)\r\n pair.append(text)\r\n stack.append(pair)\r\n pair=[]\r\n elif ':' in j and ':' not in lst[i-1]:\r\n ex = j.strip()\r\n if len(ex)>1:\r\n pair.append(lst[i-1])\r\n l = ex.split()\r\n for k in l:\r\n if k == ':':\r\n pass\r\n else:\r\n text = re.sub('[^А-яA-z0-9]', '', k)\r\n if text.isdigit():\r\n pair.append(int(text))\r\n elif text == 'None':\r\n pair.append(None)\r\n else:\r\n text = re.sub(r'\\s+', ' ', text)\r\n pair.append(text)\r\n stack.append(pair)\r\n pair = []\r\n else:\r\n pair.append(lst[i-1])\r\n elif ':' in lst[i-1] and ':' not in lst[i-2]:\r\n if pair:\r\n text = re.sub(r'\\s+', ' ', j)\r\n pair.append(text)\r\n stack.append(pair)\r\n pair = []\r\n return result\r\n\r\n\r\n# Delete duplicates in the list of dictionaries\r\ndef del_duplicate(lst):\r\n seen = set()\r\n final_d = []\r\n for d in lst:\r\n t = tuple(d.items())\r\n if t not in seen:\r\n seen.add(t)\r\n final_d.append(d)\r\n return final_d\r\n\r\n\r\n# Dump for JSON array with Json objects. Works very slow with big list\r\ndef dump_json(lst):\r\n result = '['\r\n for _ in lst:\r\n items = tuple(_.items())\r\n s = ''\r\n for key, value in items:\r\n s += '\"{}\": \"{}\", '.format(key, value)\r\n result += '{' + s[:-2] + '}, '\r\n return result[:-2] + ']'\r\n\r\n\r\nwith open(\"C:/Users/ekant/Desktop/winedata_1.json\") as f:\r\n s_1 = f.read()\r\n s_1 = s_1.replace('\\\\\"', \"'\")\r\n # s_1 = s_1.encode().decode('unicode_escape')\r\n\r\nwith open(\"C:/Users/ekant/Desktop/winedata_2.json\") as inf:\r\n s_2 = inf.read()\r\n s_2 = s_2.replace('\\\\\"', \"'\")\r\n # s_2 = s_2.encode().decode('unicode_escape')\r\n\r\n\r\nstring = s_1+s_2\r\nstring = string.replace('null', 'None')\r\nd = string.split('\"')\r\n\r\nresult_parse = parse_json(d)\r\nnew_d = del_duplicate(result_parse)\r\nprint('Дубликатов:', len(result_parse)-len(new_d))\r\n\r\n# Sort objects by price, then by wine sort\r\n\r\nwinedata = sorted(new_d, key=lambda x: '' if x['variety'] is None else x['variety'])\r\nwinedata = sorted(winedata, key=lambda x: -1 * float('inf') if x['price'] is None else x['price'], reverse=True)\r\n\r\nresult_json = dump_json(winedata)\r\n\r\nf = open('winedata_full.json', 'tw', encoding='utf-8')\r\nf.write(result_json)\r\nf.close()\r\n","sub_path":"01-Data-Structures/hw/sticks/parser_dumper_json_part1_2.py","file_name":"parser_dumper_json_part1_2.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"137790757","text":"#!/usr/bin/env python2.7\n\nimport sys\nimport os\nimport jinja2\n\nfrom fabric.api import *\nfrom fabric.tasks import execute\nimport getpass\n\ntemplateLoader = jinja2.FileSystemLoader( searchpath=\"/\" )\ntemplateEnv = jinja2.Environment( loader=templateLoader )\nTEMPDOMFILE = os.getcwd()+'/jinja2temps/mszone.conf'\n\ntempdom = templateEnv.get_template( TEMPDOMFILE )\n\nTEMPLDOMFILE = os.getcwd()+'/jinja2temps/lmdom.conf'\n\ntempldom = templateEnv.get_template( TEMPLDOMFILE )\n\nenv.roledefs = {\n 'dns': [str(raw_input('Please enter NS1 IP address: ')), str(raw_input('Please enter NS2 IP address: '))]\n}\n\nenv.user = raw_input('Please enter username for UNIX/Linux server: ')\nenv.password = getpass.getpass()\n\nprint('1. Write domain name which you want to update: ')\nprint('2. For exit, write 2 and click Enter button: ')\nent = raw_input('Write your choose: ')\n\nfor server in env.roledefs['dns']:\n env.host_string = server\n with settings(\n hide('warnings', 'running', 'stdout', 'stderr'),\n warn_only=True\n ):\n osver = run('uname -s')\n lintype = run('cat /etc/redhat-release | awk \\'{ print $1 }\\'')\n ftype = run('uname -v | awk \\'{ print $2 }\\' | cut -f1 -d \\'.\\'')\n if osver == 'FreeBSD' and ftype >= 10:\n getfbindpack = run('which named')\n if getfbindpack == '/usr/local/sbin/named':\n def writemzone():\n if server == env.roledefs['dns'][0]:\n fzonename = run('cat /usr/local/etc/namedb/named.conf | grep '+ent+' | head -1 | awk \\'{ print $2 }\\' | sed \\'s/\"//g\\'')\n fzonefile = run('cat /usr/local/etc/namedb/named.conf | grep '+ent+' | tail -1 | awk \\'{ print $2 }\\' | sed \\'s/\"//g;s/;//g\\' | cut -f 7 -d\\'/\\' | cut -f1,2 -d \\'.\\'')\n if ent == fzonename and fzonefile == ent:\n print(\"This domain name really exists...\")\n print(\"\"\"Please choose record type which you want to create: \n A\n NS\n MX\n TXT\n SRV\n \"\"\")\n rec = raw_input('Write your choose record type and press the enter button: ')\n if rec == \"A\":\n recnameA = raw_input('Please enter A record name: ')\n ipforA = raw_input('Enter IP address for A record: ')\n print('evvel')\n run('ifconfig')\n print('sonra')\n #run('echo \"'+recnameA+' IN A '+ipforA+'\" >> /usr/local/etc/named/master/'+ent+'.zone')\n else:\n print(\"Entered domain name is not exits and you cannot add new record.\")\n print(\"Please use ./python-add-zone.py script for add new nomain.\")\n\n def writeszone():\n if server == env.roledefs['dns'][1]:\n run('service named restart')\n\n if ent != 2 and len(ent) > 4:\n writemzone()\n writeszone()\n else:\n print(\"\\nMinimal symbol count must be 4.\")\n sys.exit()\n elif osver == 'Linux' and lintype == 'CentOS':\n getlbindpack = run('which named')\n bindlpidfile = run('cat /var/run/named/named.pid')\n bindlpid = run('ps waux|grep named | grep -v grep | awk \\'{ print $2 }\\'')\n if getlbindpack == '/usr/sbin/named' and bindlpidfile == bindlpid:\n def writelmzone():\n if server == env.roledefs['dns'][0]:\n a = 5\n b = 6\n print(a+b)\n def writelszone():\n if server == env.roledefs['dns'][1]:\n a = 6 \n b = 7\n print(a+b)\n if ent != 2 and len(ent) > 4:\n writelmzone()\n writelszone()\n else:\n print(\"\\nMinimal symbol count must be 4.\")\n sys.exit()\n else:\n print(\"The script is not determine server type. For this reason you cannot use this script.\")\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"585124115","text":"#Write a Python program to combine two dictionary adding values for common keys. Go to the editor\n#d1 = {'a': 100, 'b': 200, 'c':300}\n#d2 = {'a': 300, 'b': 200, 'd':400}\n#Sample output: Counter({'a': 400, 'b': 400, 'd': 400, 'c': 300})\n\n\nd1 = {'a': 100, 'b': 200, 'c':300}\nd2 = {'a': 300, 'b': 200, 'd':400}\nd3= dict()\nfor i,j in d1.items():\n for k,l in d2.items():\n if i == k:\n sum=j+l\n d3[i]=sum\n else:\n d3[i]=j\n\nprint(d3)","sub_path":"venv/My programs/Exercises/Exercise14.py","file_name":"Exercise14.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"97904736","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\nimport json\nfrom urllib import urlencode\n\nfrom django.contrib.auth.models import Permission\nfrom django.core.urlresolvers import reverse\n\nfrom timepiece import models as timepiece\nfrom timepiece.tests.base import TimepieceDataTestCase\n\n\nclass TestProductivityReport(TimepieceDataTestCase):\n url_name = 'productivity_report'\n\n def setUp(self):\n self.username = 'test_user'\n self.password = 'password'\n self.user = self.create_user(username=self.username,\n password=self.password)\n self.permission = Permission.objects.get(codename='view_entry_summary')\n self.user.user_permissions.add(self.permission)\n self.client.login(username=self.username, password=self.password)\n\n self.project = self.create_project()\n self.users = []\n self.users.append(self.create_user(first_name='Person', last_name='1'))\n self.users.append(self.create_user(first_name='Person', last_name='2'))\n self.users.append(self.create_user(first_name='Person', last_name='3'))\n self.weeks = []\n self.weeks.append(datetime.datetime(2012, 9, 24))\n self.weeks.append(datetime.datetime(2012, 10, 1))\n self.weeks.append(datetime.datetime(2012, 10, 8))\n self.weeks.append(datetime.datetime(2012, 10, 15))\n\n self._create_entries()\n self._create_assignments()\n\n def _create_entries(self):\n for start_time in (self.weeks[1], self.weeks[3]):\n for user in (self.users[1], self.users[2]):\n end_time = start_time + relativedelta(hours=2)\n data = {'user': user, 'start_time': start_time,\n 'end_time': end_time, 'project': self.project}\n self.create_entry(data)\n\n def _create_assignments(self):\n for week_start in (self.weeks[0], self.weeks[1]):\n for user in (self.users[0], self.users[1]):\n data = {'user': user, 'week_start': week_start,\n 'project': self.project, 'hours': 2}\n self.create_project_hours_entry(**data)\n\n def _get(self, url_name=None, url_kwargs=None, get_kwargs=None, **kwargs):\n \"\"\"Convenience wrapper for test client GET request.\"\"\"\n url_name = url_name or self.url_name\n url = reverse(url_name, kwargs=url_kwargs)\n if get_kwargs:\n url += '?' + urlencode(get_kwargs)\n return self.client.get(url, **kwargs)\n\n def _unpack(self, response):\n form = response.context['form']\n report = json.loads(response.context['report'])\n organize_by = response.context['type']\n worked = response.context['total_worked']\n assigned = response.context['total_assigned']\n return form, report, organize_by, worked, assigned\n\n def _check_row(self, row, correct):\n self.assertEqual(len(row), 3)\n self.assertEqual(row[0], correct[0])\n self.assertEqual(float(row[1]), correct[1])\n self.assertEqual(float(row[2]), correct[2])\n\n def test_not_authenticated(self):\n \"\"\"User must be logged in to see this report.\"\"\"\n self.client.logout()\n response = self._get()\n self.assertEqual(response.status_code, 302) # Redirects to login\n\n def test_no_permission(self):\n \"\"\"This report requires permission to view entry summaries.\"\"\"\n self.user.user_permissions.remove(self.permission)\n response = self._get()\n self.assertEqual(response.status_code, 302) # Redirects to login\n\n def test_retrieval(self):\n \"\"\"No report data should be returned upon initial retrieval.\"\"\"\n response = self._get()\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 0)\n self.assertEqual(len(report), 0)\n self.assertEqual(organize_by, '')\n self.assertEqual(float(worked), 0.0)\n self.assertEqual(float(assigned), 0.0)\n\n def test_no_project(self):\n \"\"\"Form requires specification of project.\"\"\"\n data = {'organize_by': 'week'}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 1)\n self.assertTrue('project' in form.errors)\n self.assertEqual(len(report), 0)\n self.assertEqual(organize_by, '')\n self.assertEqual(float(worked), 0.0)\n self.assertEqual(float(assigned), 0.0)\n\n def test_invalid_project_id(self):\n \"\"\"Form requires specification of valid project.\"\"\"\n data = {'organize_by': 'week', 'project_1': 12345}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 1)\n self.assertTrue('project' in form.errors)\n self.assertEqual(len(report), 0)\n self.assertEqual(organize_by, '')\n self.assertEqual(float(worked), 0.0)\n self.assertEqual(float(assigned), 0.0)\n\n def test_no_organize_by(self):\n \"\"\"Form requires specification of organization method.\"\"\"\n data = {'project_1': self.project.pk}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 1)\n self.assertTrue('organize_by' in form.errors)\n self.assertEqual(len(report), 0)\n self.assertEqual(organize_by, '')\n self.assertEqual(float(worked), 0.0)\n self.assertEqual(float(assigned), 0.0)\n\n def test_invalid_organize_by(self):\n \"\"\"Form requires specification of valid organization method.\"\"\"\n data = {'project_1': self.project.pk, 'organize_by': 'invalid'}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 1)\n self.assertTrue('organize_by' in form.errors)\n self.assertEqual(len(report), 0)\n self.assertEqual(organize_by, '')\n self.assertEqual(float(worked), 0.0)\n self.assertEqual(float(assigned), 0.0)\n\n def test_no_data(self):\n \"\"\"If no data, report should contain header row only.\"\"\"\n timepiece.Entry.objects.filter(project=self.project).delete()\n timepiece.ProjectHours.objects.filter(project=self.project).delete()\n data = {'project_1': self.project.pk, 'organize_by': 'week'}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 0)\n self.assertEqual(len(report), 1)\n self.assertEqual(organize_by, 'week')\n self.assertEqual(float(worked), 0.0)\n self.assertEqual(float(assigned), 0.0)\n\n def test_organize_by_week(self):\n \"\"\"Report should contain hours per week on the project.\"\"\"\n data = {'project_1': self.project.pk, 'organize_by': 'week'}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 0)\n self.assertEqual(organize_by, 'week')\n self.assertEqual(float(worked), 8.0)\n self.assertEqual(float(assigned), 8.0)\n self.assertEqual(len(report), 1 + 4) # Include header row\n self._check_row(report[1], ['2012-09-24', 0.0, 4.0])\n self._check_row(report[2], ['2012-10-01', 4.0, 4.0])\n self._check_row(report[3], ['2012-10-08', 0.0, 0.0])\n self._check_row(report[4], ['2012-10-15', 4.0, 0.0])\n\n def test_organize_by_people(self):\n \"\"\"Report should contain hours per peron on the project.\"\"\"\n data = {'project_1': self.project.pk, 'organize_by': 'person'}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n form, report, organize_by, worked, assigned = self._unpack(response)\n self.assertEqual(len(form.errors), 0)\n self.assertEqual(organize_by, 'person')\n self.assertEqual(float(worked), 8.0)\n self.assertEqual(float(assigned), 8.0)\n self.assertEqual(len(report), 1 + 3) # Include header row\n self._check_row(report[1], ['Person 1', 0.0, 4.0])\n self._check_row(report[2], ['Person 2', 4.0, 4.0])\n self._check_row(report[3], ['Person 3', 4.0, 0.0])\n\n def test_export(self):\n \"\"\"Data should be exported in CSV format.\"\"\"\n data = {'project_1': self.project.pk, 'organize_by': 'week',\n 'export': True}\n response = self._get(data=data)\n self.assertEqual(response.status_code, 200)\n data = dict(response.items())\n self.assertEqual(data['Content-Type'], 'text/csv')\n disposition = 'attachment; filename={0}_productivity.csv'.format(\n self.project.name)\n self.assertTrue(data['Content-Disposition'].startswith(disposition))\n report = response.content.splitlines()\n self.assertEqual(len(report), 1 + 4) # Include header row\n self._check_row(report[1].split(','), ['2012-09-24', 0.0, 4.0])\n self._check_row(report[2].split(','), ['2012-10-01', 4.0, 4.0])\n self._check_row(report[3].split(','), ['2012-10-08', 0.0, 0.0])\n self._check_row(report[4].split(','), ['2012-10-15', 4.0, 0.0])\n","sub_path":"timepiece/tests/reports/productivity.py","file_name":"productivity.py","file_ext":"py","file_size_in_byte":9716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"277926503","text":"import random\nimport math\n\nclass question5():\n\tdef mutation(self, X: str, chi: float, rep: int):\n\t\tmu = chi / len(X)\n\t\tfor i in range(0, rep):\n\t\t\tX = list(X)\n\t\t\tfor i in range(0, len(X)):\n\t\t\t\ttemp = random.random()\n\t\t\t\tif temp < mu:\n\t\t\t\t\tX[i] = str(int(math.fabs(int(X[i]) - 1)))\n\t\t\tX = ''.join(X)\n\t\treturn (X)\n\n\tdef crossover(self, X: str, Y: str, rep: int):\n\t\tfor i in range(0, rep):\n\t\t\ttemp = [0] * len(X)\n\t\t\tfor i in range(0, len(X)):\n\t\t\t\tp = random.random()\n\t\t\t\ttemp[i] = X[i] if p > 0.5 else Y[i]\n\t\t\ttemp = ''.join(temp)\n\t\treturn temp\n\n\tdef oneMAX(self, X: str):\n\t\tevaluate = sum([int(i) for i in X])\n\t\treturn evaluate\n\n\tdef tournament(self, X: list, k: int, rep: int):\n\t\tX = X\n\t\tmu = 1 / len(X)\n\t\t# mu = 0.9\n\t\tif k > len(X):\n\t\t\treturn False\n\t\telse:\n\t\t\tfor i in range(rep):\n\t\t\t\tx = []\n\t\t\t\tans = [0] * k\n\t\t\t\tm = len(X)\n\t\t\t\tfor i in range(0, m):\n\t\t\t\t\tx.append(self.oneMAX(X[i]))\n\t\t\t\tOut = list(zip(x, X))\n\t\t\t\tx = sorted(Out, key=lambda x: x[0], reverse=True)\n\t\t\t\ti = 0\n\t\t\t\twhile 0 in ans:\n\t\t\t\t\tp = random.random()\n\t\t\t\t\ta = b = 0\n\t\t\t\t\tfor j in range(0, len(X)):\n\t\t\t\t\t\ta = a + bool(j) * mu * (1 - mu) ** (j - 1)\n\t\t\t\t\t\tb = b + mu * (1 - mu) ** (j)\n\t\t\t\t\t\tif a < p <= b:\n\t\t\t\t\t\t\tm = x[j][1]\n\t\t\t\t\t\t\tans[i] = (str(x[j][1]))\n\t\t\t\t\t\t\ti = i + 1\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\treturn ans, X\n\n\tdef create(self, n, pop):\n\t\tX = [0] * n\n\t\tfor i in range(0, n):\n\t\t\ttemp = []\n\t\t\tfor j in range(0, pop):\n\t\t\t\tp = random.random()\n\t\t\t\ttemp.append('1' if p > 0.5 else '0')\n\t\t\tX[i] = ''.join(temp)\n\t\treturn X\n\n\tdef Genetic(self, chi, n, Lambda, k, Rep):\n\t\trep = Rep\n\t\tflag = 0\n\t\tX = self.create(n, Lambda)\n\t\tX1, X = self.tournament(X, k, rep)\n\t\tX1[0] = self.crossover(X1[0], X1[1], rep)\n\t\tX1[1] = self.crossover(X1[0], X1[1], rep)\n\t\tX1[0] = self.mutation(X1[0], chi, rep)\n\t\tX1[1] = self.mutation(X1[1], chi, rep)\n\t\tX.extend(X1)\n\t\tX_eva = []\n\t\tfor i in range(len(X)):\n\t\t\tX_eva.append(self.oneMAX(X[i]))\n\t\tOut = list(zip(X, X_eva))\n\t\tOut.sort(key=lambda x: x[0], reverse=True)\n\t\tans = Out[0]\n\t\tif ans[1] == n:\n\t\t\treturn n, chi, Lambda, k, rep, ans[1], ans[0]\n\t\telse:\n\t\t\twhile not int(ans[1]) == n:\n\t\t\t\tif rep <= 20000:\n\t\t\t\t\trep = rep + 1\n\t\t\t\t\tX1, X = self.tournament(X, k, 1)\n\t\t\t\t\tX1[0] = self.crossover(X1[0], X1[1], 1)\n\t\t\t\t\tX1[1] = self.crossover(X1[0], X1[1], 1)\n\t\t\t\t\tX1[0] = self.mutation(X1[0], chi, 1)\n\t\t\t\t\tX1[1] = self.mutation(X1[1], chi, 1)\n\t\t\t\t\tX.extend(X1)\n\t\t\t\t\tX_eva = []\n\t\t\t\t\tfor i in range(len(X)):\n\t\t\t\t\t\tX_eva.append(self.oneMAX(X[i]))\n\t\t\t\t\tOut = list(zip(X, X_eva))\n\t\t\t\t\tOut.sort(key=lambda x: x[1], reverse=True)\n\t\t\t\t# print(Out)\n\t\t\t\t\tans = Out[0]\n\t\t\t\t\tprint(ans[1],rep)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\t\treturn 'cirlce limits'\n\t\t\t# print(ans)\n\t\t\treturn n, chi, Lambda, k, rep, ans[1], ans[0]\n\n\napp = question5()\n# chi = float(input('please input chi: '))\n# n = int(input('please input n: '))\n# Lambda = int(input('please input population: '))\n# k = int(input('please input k: '))\n# rep = int(input('please input rep: '))\nX = app.create(200, 100)\n# print(X)\nY = app.mutation(X[1], 0.2, 4)\n# print(Y)\nZ = app.crossover(X[3], X[4], 4)\n# print(Z)\nSigma = app.tournament(X, 2, 4)\n# print(Sigma)\nchi = 2\n\nprint(app.Genetic(chi, n=10, Lambda=10, k=2, Rep=4))\n","sub_path":"question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"465518907","text":"import logging\n\nfrom hca_ingest.api.ingestapi import IngestApi\n\nfrom manifest.generator import ManifestGenerator\n\n\nclass ManifestExporter:\n def __init__(self, ingest_api: IngestApi, manifest_generator: ManifestGenerator):\n self.logger = logging.getLogger('ManifestExporter')\n self.ingest_api = ingest_api\n self.manifest_generator = manifest_generator\n\n def export(self, process_uuid: str, submission_uuid: str):\n assay_manifest = self.manifest_generator.generate_manifest(process_uuid, submission_uuid)\n assay_manifest_resource = self.ingest_api.create_bundle_manifest(assay_manifest)\n assay_manifest_url = assay_manifest_resource['_links']['self']['href']\n self.logger.info(f\"Assay manifest was created: {assay_manifest_url}\")\n","sub_path":"manifest/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"362822125","text":"from d3m import container\nfrom d3m.primitive_interfaces.transformer import TransformerPrimitiveBase\nfrom d3m.metadata import hyperparams\nfrom dsbox.datapreprocessing.cleaner import config\nfrom d3m.primitive_interfaces.base import CallResult\nimport common_primitives.utils as common_utils\nimport d3m.metadata.base as mbase\nimport warnings\n\n__all__ = ('Unfold',)\n\nInputs = container.DataFrame\nOutputs = container.DataFrame\n\n\nclass UnfoldHyperparams(hyperparams.Hyperparams):\n unfold_semantic_types = hyperparams.Set(\n elements=hyperparams.Hyperparameter[str](\"str\"),\n default=[\"https://metadata.datadrivendiscovery.org/types/PredictedTarget\"],\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\n \"\"\"\n A set of semantic types that the primitive will unfold.\n Only 'https://metadata.datadrivendiscovery.org/types/PredictedTarget' by default.\n \"\"\",\n )\n use_pipeline_id_semantic_type = hyperparams.UniformBool(\n default=False,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\n \"\"\"\n Controls whether semantic_type will be used for finding pipeline id column in input dataframe.\n If true, it will look for 'https://metadata.datadrivendiscovery.org/types/PipelineId' for pipeline id column,\n and create attribute columns using header: attribute_{pipeline_id}. \n eg. 'binaryClass_{a3180751-33aa-4790-9e70-c79672ce1278}'\n If false, create attribute columns using header: attribute_{0,1,2,...}.\n eg. 'binaryClass_0', 'binaryClass_1'\n \"\"\",\n )\n\n\nclass Unfold(TransformerPrimitiveBase[Inputs, Outputs, UnfoldHyperparams]):\n \"\"\"\n A primitive which concat a list of dataframe to a single dataframe vertically\n \"\"\"\n\n __author__ = 'USC ISI'\n metadata = hyperparams.base.PrimitiveMetadata({\n \"id\": \"dsbox-unfold\",\n \"version\": config.VERSION,\n \"name\": \"DSBox unfold\",\n \"description\": \"A primitive which unfold a vertically concatenated dataframe\",\n \"python_path\": \"d3m.primitives.data_preprocessing.Unfold.DSBOX\",\n \"primitive_family\": \"DATA_PREPROCESSING\",\n \"algorithm_types\": [\"DATA_CONVERSION\"],\n \"source\": {\n \"name\": config.D3M_PERFORMER_TEAM,\n \"contact\": config.D3M_CONTACT,\n \"uris\": [config.REPOSITORY]\n },\n \"keywords\": [\"unfold\"],\n \"installation\": [config.INSTALLATION],\n })\n\n def __init__(self, *, hyperparams: UnfoldHyperparams) -> None:\n super().__init__(hyperparams=hyperparams)\n self.hyperparams = hyperparams\n self._sorted_pipe_ids = None\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n primary_key_cols = common_utils.list_columns_with_semantic_types(\n metadata=inputs.metadata,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/PrimaryKey\"]\n )\n\n unfold_cols = common_utils.list_columns_with_semantic_types(\n metadata=inputs.metadata,\n semantic_types=self.hyperparams[\"unfold_semantic_types\"]\n )\n\n if not primary_key_cols:\n warnings.warn(\"Did not find primary key column for grouping. Will not unfold\")\n return CallResult(inputs)\n\n if not unfold_cols:\n warnings.warn(\"Did not find any column to unfold. Will not unfold\")\n return CallResult(inputs)\n\n primary_key_col_names = [inputs.columns[pos] for pos in primary_key_cols]\n unfold_col_names = [inputs.columns[pos] for pos in unfold_cols]\n\n if self.hyperparams[\"use_pipeline_id_semantic_type\"]:\n pipeline_id_cols = common_utils.list_columns_with_semantic_types(\n metadata=inputs.metadata,\n semantic_types=[\"https://metadata.datadrivendiscovery.org/types/PipelineId\"]\n )\n\n if len(pipeline_id_cols) >= 2:\n warnings.warn(\"Multiple pipeline id columns found. Will use first.\")\n\n if pipeline_id_cols:\n inputs = inputs.sort_values(primary_key_col_names + [inputs.columns[pos] for pos in pipeline_id_cols])\n self._sorted_pipe_ids = sorted(inputs.iloc[:, pipeline_id_cols[0]].unique())\n else:\n warnings.warn(\n \"No pipeline id column found by 'https://metadata.datadrivendiscovery.org/types/PipelineId'\")\n\n new_df = self._get_new_df(inputs=inputs, use_cols=primary_key_cols + unfold_cols)\n\n groupby_df = inputs.groupby(primary_key_col_names)[unfold_col_names].aggregate(\n lambda x: container.List(x)).reset_index(drop=False)\n\n ret_df = container.DataFrame(groupby_df)\n ret_df.metadata = new_df.metadata\n ret_df = self._update_metadata_dimension(df=ret_df)\n\n split_col_names = [inputs.columns[pos] for pos in unfold_cols]\n\n ret_df = self._split_aggregated(df=ret_df, split_col_names=split_col_names)\n ret_df = common_utils.remove_columns(\n inputs=ret_df,\n column_indices=[ret_df.columns.get_loc(name) for name in split_col_names]\n )\n\n return CallResult(ret_df)\n\n @staticmethod\n def _get_new_df(inputs: container.DataFrame, use_cols: list):\n metadata = common_utils.select_columns_metadata(inputs_metadata=inputs.metadata, columns=use_cols)\n new_df = inputs.iloc[:, use_cols]\n new_df.metadata = metadata\n return new_df\n\n @staticmethod\n def _update_metadata_dimension(df: container.DataFrame) -> container.DataFrame:\n old_metadata = dict(df.metadata.query(()))\n old_metadata[\"dimension\"] = dict(old_metadata[\"dimension\"])\n old_metadata[\"dimension\"][\"length\"] = df.shape[0]\n df.metadata = df.metadata.update((), old_metadata)\n return df\n\n def _split_aggregated(self, df: container.DataFrame, split_col_names: list) -> container.DataFrame:\n lengths = [len(df.loc[0, col_name]) for col_name in split_col_names]\n\n for idx, col_name in enumerate(split_col_names):\n if self._sorted_pipe_ids:\n if len(self._sorted_pipe_ids) == lengths[idx]:\n extend_col_names = [\"{}_{}\".format(col_name, i) for i in self._sorted_pipe_ids]\n else:\n raise ValueError(\"Unique number of pipeline ids not equal to the number of aggregated values\")\n else:\n extend_col_names = [\"{}_{}\".format(col_name, i) for i in range(lengths[idx])]\n\n extends = container.DataFrame(df.loc[:, col_name].values.tolist(), columns=extend_col_names)\n\n df = common_utils.horizontal_concat(left=df, right=extends)\n origin_metadata = dict(df.metadata.query((mbase.ALL_ELEMENTS, df.columns.get_loc(col_name))))\n\n for name in extend_col_names:\n col_idx = df.columns.get_loc(name)\n origin_metadata[\"name\"] = name\n df.metadata = df.metadata.update((mbase.ALL_ELEMENTS, col_idx), origin_metadata)\n\n return df\n","sub_path":"dsbox/datapostprocessing/unfold.py","file_name":"unfold.py","file_ext":"py","file_size_in_byte":7171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"442333828","text":"# -*- coding: utf-8 -*-\nimport logging\nimport requests\nimport json\nimport uuid\nimport re\nimport datetime\nimport time\nimport os\nimport smtplib\nfrom email.mime.text import MIMEText\n\ndef send_email(msg):\n mail_host = 'smtp.qq.com' \n mail_user = '734962820' \n mail_pass = 'QQ邮箱授权码' \n sender = '734962820@qq.com' \n receivers = ['734962820@qq.com'] \n message = MIMEText(msg,'html','utf-8')\n message['Subject'] = '云战役自动签到' \n message['From'] = sender \n message['To'] = receivers[0]\n try:\n smtpObj = smtplib.SMTP_SSL(mail_host)\n smtpObj.login(mail_user,mail_pass) \n smtpObj.sendmail(sender,receivers,message.as_string()) \n smtpObj.quit() \n print('send email success')\n except smtplib.SMTPException as e:\n print('send email error',e)\n\ndef run():\n header = {'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'}\n users = '''\n [\n {\n \"name\": \"111111\",\n \"psswd\": \"111111\",\n \"locationInfo\": \"浙江省杭州市金沙港生活区\"\n },\n {\n \"name\": \"222222\",\n \"psswd\": \"222222\",\n \"locationInfo\": \"浙江省杭州市金沙港生活区\"\n }\n ]\n '''\n users = json.loads(users)\n print('当前时间:', datetime.datetime.now())\n\n user_error_flag = False\n result_error_flag = False\n for user in users:\n s = requests.session()\n s.post('https://nco.zjgsu.edu.cn/login', data=user, headers=header)\n res = s.get('https://nco.zjgsu.edu.cn/', headers=header)\n content = str(res.content, encoding='utf-8')\n if re.search('当天已报送!', content):\n print(datetime.datetime.now().strftime('%Y-%m-%d'), '报送情况: *主动报送*')\n continue\n data = {}\n try:\n for item in re.findall(R'', content):\n key = re.search(R'name=\"(.+?)\"', item).group(1)\n value = re.search(R'value=\"(.*?)\"', item).group(1)\n check = re.search(R'checked', item)\n if key not in data.keys():\n data[key] = value\n elif check is not None:\n data[key] = value\n except:\n print('出现错误,可能是账号密码不正确')\n user_error_flag = True\n continue\n for item in re.findall(R'', content):\n key = re.search(R'name=\"(.+?)\"', item).group(1)\n data[key] = ''\n # 为了安全起见,这里还是推荐加上大致的地址和uuid值,虽然经过测试,不填写也可以正常使用\n # ---------------安全线-------------#\n data['uuid'] = str(uuid.uuid1())\n if('locationInfo' not in data):\n data['locationInfo'] = '浙江省杭州市浙江工商大学金沙港生活区'\n # ---------------安全线-------------#\n res = s.post('https://nco.zjgsu.edu.cn/', data=data, headers=header)\n success_flag = re.search('报送成功', str(res.content, encoding='utf-8')) is not None\n print(datetime.datetime.now().strftime('%Y-%m-%d'), '报送情况:', '报送成功' if success_flag\n else '报送失败!!!!!')\n if(not success_flag):\n result_error_flag = True\n if(user_error_flag):\n send_email('存在用户信息错误导致的签到失败')\n elif(result_error_flag):\n send_email('签到网站错误,或签到入口已关闭')\n \ndef handler(event, context): #主要云函数入口名称!\n return run()\n","sub_path":"Ali_tencent_Event.py","file_name":"Ali_tencent_Event.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"307765532","text":"vowels=['a','e','i','o','u','A','E','I','O','U']\nfinalVowel = []\ni=input('enter a string')\ndef vowelfilter():\n for letter in i:\n if letter in vowels:\n finalVowel.append(letter)\n return finalVowel\nk=vowelfilter()\nprint(k)\n","sub_path":"project1/functions/filtervowel.py","file_name":"filtervowel.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"201351101","text":"import requests\nimport os\nimport pandas as pd\nfrom datetime import datetime\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\n# URL = 'https://rsr.akvo.org/rest/v1/'\nURL = 'http://192.168.1.134/rest/v1/'\n#PROJECT_ID = '7283'\nRSR_TOKEN = os.environ['RSR_TOKEN']\nFMT = '/?format=json&limit=1'\nFMT100 = '/?format=json&limit=100'\n\nheaders = {\n 'content-type': 'application/json',\n 'Authorization': RSR_TOKEN\n}\n\ndef get_response(endpoint, param, value):\n uri = '{}{}{}&{}={}'.format(URL, endpoint, FMT100, param, value)\n print(get_time() + ' Fetching - ' + uri)\n data = requests.get(uri, headers=headers)\n data = data.json()\n return data\n\ndef get_time():\n now = datetime.now().time().strftime(\"%H:%M:%S\")\n return now\n\ndef get_children_id(x):\n for k,v in x.items():\n return k\n\ndef get_children_title(x):\n for k,v in x.items():\n return v\n\n@app.route('/api/', methods=['GET'])\ndef api(project_id):\n results_indicator = get_response('results_framework','project',project_id)['results']\n indicators = [{'id':x['id'],'title':x['title']} for x in results_indicator]\n results_indicator_df = pd.DataFrame(results_indicator)\n results_indicator_df['child_id'] = results_indicator_df['child_projects'].apply(get_children_id)\n results_indicator_df['child_title'] = results_indicator_df['child_projects'].apply(get_children_title)\n results_indicator = results_indicator_df.to_dict('records')\n indicator_periods = []\n for result in results_indicator:\n for indicator in result['indicators']:\n for period in indicator['periods']:\n period.update({'project_id':result['project']})\n period.update({'project_name':result['project_title']})\n period.update({'project_title':result['title']})\n period.update({'parent_project':result['parent_project']})\n period.update({'parent_result':result['parent_result']})\n period.update({'child_project_id':result['child_id']})\n period.update({'child_project_title':result['child_title']})\n period.update({'dimensions':indicator['dimension_names']})\n indicator_periods.append(period)\n indicator_periods = pd.DataFrame(indicator_periods)\n indicator_periods[['period_end_year','period_end_month','period_end_date']] = indicator_periods['period_end'].str.split('-', expand=True)\n indicator_periods[['period_start_year','period_start_month','period_start_date']] = indicator_periods['period_start'].str.split('-', expand=True)\n period_start = indicator_periods.groupby('period_start').size().to_frame('size').reset_index()\n period_end = indicator_periods.groupby('period_end').size().to_frame('size').reset_index()\n period_start.rename(columns={'period_start_year':'start_year'})\n period_end.rename(columns={'period_end_year':'end_year'})\n period_start = period_start.to_dict('records')\n period_end = period_end.to_dict('records')\n indicator_periods = indicator_periods.to_dict('records')\n api = {\n 'dd_indicators': indicators,\n 'dd_start': period_start,\n 'dd_end': period_end,\n 'dd_region': ['Malawi', 'Zambia', 'Mozambique'],\n 'indicator_periods': indicator_periods,\n }\n return jsonify(api)\n\nif __name__ == '__main__':\n app.run(host= '0.0.0.0',debug=True, port=5000)\n","sub_path":"sites/appsa-api/test/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"198627269","text":"\nimport math as m\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport csv\n\n\ndef spotRates_to_DF(rates, T, feq = -1):\n \"\"\" This function converts the spot rates to discount factors\n \n :param rates: spot rates\n :param T: vector that contains the time indices\n :param feq: compounding frequencies\n \"\"\"\n \n DF = []\n if feq == -1:\n for r, t in zip(rates, T):\n DF.append(m.exp(-r*t))\n else:\n for r, t in zip(rates, T):\n DF.append(m.pow(1+r/feq, -feq*t))\n return DF\n\ndef DF_to_Rates(DF, T, feq=-1):\n \"\"\" This function converts the discount factors to \n \n :param rates: spot rates\n :param T: vector that contains the time indices\n :param feq: compounding frequencies\n \"\"\"\n rates = []\n if feq == -1: ##CONTINUOUS COMPONDING RATES\n for t, df in zip(T, DF):\n rates.append(-m.log(df) / t)\n else: ##DISCRETE COMPOUNDING RATES\n for t, df in zip(T, DF):\n rates.append((pow(df, - 1/(t*feq))-1)*feq)\n return rates\n\nclass YieldCurve(object):\n '''\n The Yield Curve class that stores the the discount factor / time to maturity \n that facilitates automatic rates conversions, rates interpolation etc.\n '''\n\n def __init__(self):\n self.DF = []\n self.T = []\n \n def setCurve(self, T, DF):\n '''\n Initialize the yield curve with time to maturity and discount rates\n \n :param T: Time to maturity\n :param DF: Discount Rate\n \n .. warning:: must make sure that T and DF must have the same length\n '''\n self.DF = DF\n self.T = T\n \n def plotCurve(self,feq = -1):\n '''\n Plot the Curve.\n \n Note the plt.show() is needed in the main code to show the plot\n '''\n spotRates = self.getSpotRates(feq)\n plt.plot(self.T,spotRates, label ='Spot Rates')\n\n def getDiscountFactor(self):\n '''\n Returns discount factor\n \n :returns: Discount factor of the yield curve\n '''\n return self.DF\n \n def getMaturityDate(self):\n '''\n Returns the maturity date of yield curve\n \n :returns: Maturity date\n '''\n return self.T\n \n def getSpotRates(self, feq=-1):\n '''\n Compute the spot rates with respect to certain compounding frequencies, default is -1, which is corresponding to continuous compounding.\n \n :param feq: compounding frequencies, feq = -1 indicates continous compounding\n :returns: Spot rates of certain compounding frequencies\n '''\n \n rates = []\n if feq == -1: ##CONTINUOUS COMPONDING RATES\n for t, df in zip(self.T, self.DF):\n rates.append(-m.log(df) / t)\n else: ##DISCRETE COMPOUNDING RATES\n for t, df in zip(self.T, self.DF):\n rates.append((pow(df, - 1.0/(t*feq))-1)*feq)\n return rates\n\n def exportSpotRates(self, exportFileName, feq=-1):\n '''\n Export the spot rates of certain compounding frequencies to a csv file\n \n :param feq: compounding frequencies\n :param exportFileName: path and the name of the file to be exported\n '''\n rates = self.getSpotRates(feq);\n with open(exportFileName, 'wb') as f:\n writer = csv.writer(f, delimiter = ',')\n writer.writerow(['Maturity', 'Spot Rates'])\n for t, r in zip(self.T, rates):\n writer.writerow([t,r])\n \n def getForwardRates_PeriodByPeriod(self, feq=-1):\n '''\n Compute the forward rates\n '''\n if feq == -1:\n forwardRates = [-m.log(self.DF[0]) / self.T[0]]\n for i in range(len(self.DF)-1):\n dT = self.T[i+1] - self.T[i]\n forwardRates.append(-m.log(self.DF[i+1]/self.DF[i]) / dT)\n else:\n forwardRates = [(pow(self.DF[0], -1/self.T[0] / feq) - 1) * feq]\n for i in range(len(self.DF) -1):\n dT = self.T[i+1] - self.T[i]\n forwardRates.append((pow(self.DF[i+1]/self.DF[i], -1 / dT /feq) - 1) * feq)\n return forwardRates\n \n def getForwardRates(self,startT, endT, feq = -1):\n\n startT, startDF = np.asarray(self.getInterpolatedDF(startT, feq))\n endT, endDF = np.asarray(self.getInterpolatedDF(endT, feq))\n forwardDF = endDF / startDF\n \n return DF_to_Rates(forwardDF, endT - startT, feq)\n \n def getParYield(self,startT = 0):\n '''\n T_i = np.arange(0.25,self.T[-1],0.25)\n T_i, DF_i = self.getInterpolatedDF(T_i, 2)\n DF_i = np.asarray(DF_i) \n f = lambda i: 2*(1-DF_i[i]) / np.sum(DF_i[i::-2])\n return T_i, map(f,range(len(T_i)))\n '''\n T_i = np.arange(startT + 0.5, self.T[-1]+0.5, 0.5)\n T_i, DF_i = self.getInterpolatedDF(T_i, 2)\n \n if startT == 0:\n DF_start = [1];\n else:\n startT, DF_start = self.getInterpolatedDF([startT], 2)\n \n FDF_i = np.asarray(DF_i) / DF_start[0]\n f = lambda i: 2*(1-FDF_i[i]) / np.sum(FDF_i[i::-1])\n return T_i, map(f, range(len(T_i)))\n \n def getInterpolatedRates(self, T_int, feq = -1):\n '''\n Assume that the interpolate points T_int are in increasing order\n '''\n rates = self.getSpotRates(feq) ## Get the current rates\n newT = []\n newrates = []\n i = 1;\n for t in T_int:\n if t < self.T[0]:\n continue\n while i < len(self.T) and not(self.T[i-1] <= t < self.T[i]):\n i += 1\n if i == len(self.T):\n if self.T[i-1] == t:\n newrates.append(rates[i-1])\n newT.append(t)\n break\n range_int = self.T[i] - self.T[i-1]\n new_r = (rates[i] * (t - self.T[i-1]) + rates[i-1] * (self.T[i] - t)) / range_int\n newT.append(t)\n newrates.append(new_r)\n return newT, newrates \n \n def getInterpolatedDF(self, T_int, feq = -1):\n newT, newrates = self.getInterpolatedRates(T_int, feq)\n newDF = spotRates_to_DF(newrates, newT, feq)\n return newT, newDF\n \n \n \n ","sub_path":"FixedIncome_Packages/yieldCurve.py","file_name":"yieldCurve.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"131157696","text":"class Solution:\n # @return a list of lists of length 3, [[val1,val2,val3]]\n def threeSum(self, num):\n num.sort()\n n = len(num)\n\n d = {}\n\n for i, v in enumerate(num):\n d[v] = i\n\n ans = set()\n for i in range(0, n - 2):\n for j in range(i + 1, n - 1):\n if num[i] + num[j] + num[j + 1] > 0:\n break\n\n if - (num[i] + num[j]) in d:\n idx = d[-num[i] - num[j]]\n ans.add((num[i], num[j], num[idx]))\n\n return [ list(a) for a in ans ]","sub_path":"3Sum/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"479692464","text":"\"\"\"Author - Maksim Sapunov, msdir6199@gmail.com 15/01/2021\"\"\"\n\n# Задача #6... реализовать два небольших скрипта:\n# Подзадача #2. Итераторб повторяющий элементы некоторого списка, определенного заранее\n\nfrom random import randint\nfrom itertools import cycle\nfrom sys import argv\n\nscript_name, len_of_list = argv\n# Случайное заполнение списка с помощью генератора\nlist_random = [randint(1, 100) for el in range(int(len_of_list))]\n# Определение условия выхода из цикла\nflag = 0\n# Счетчик для визуального оформления вывода\ndivider = 0\n\nfor el in cycle(list_random):\n flag += 1\n if flag > 50:\n break\n else:\n if divider == len(list_random):\n print('\\n--------------')\n divider = 0\n print(el, end=' ')\n divider += 1\n","sub_path":"Python_basic/Lesson_4/L4_Task_6_2.py","file_name":"L4_Task_6_2.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"356963067","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# Experiment Setup\nnp.random.seed(19680801)\nnum_actions = 10\nnum_trials = 2000\nnum_iter = 10000\nepsilon_values = [1e0, 1e-1, 1e-2, 1e-3]\nepsilons = np.array(epsilon_values * num_trials)\nnum_samples = len(epsilons)\n\nq_star_a = np.repeat(np.random.normal(size=[num_actions, num_trials]), len(epsilon_values), axis=1)\noptimal_action = np.argmax(q_star_a, axis=0)\noptimal_actions = np.zeros([num_iter, num_samples], dtype=np.int32)\nR_t_a = np.zeros([num_iter, num_actions, num_samples])\nQ_a = np.zeros([num_actions, num_samples])\nK_a = np.zeros([num_actions, num_samples], dtype=np.int32)\n\n# The first action is always assumed to be the action at index 0\n# Absent prior knowledge, this is equivalent to a random choice\n\nfor t in range(1, num_iter):\n # Select Action\n is_greedy = np.random.random(num_samples) < (1 - epsilons)\n greedy_actions = np.argmax(Q_a, axis=0)\n random_actions = np.random.randint(num_actions, size=num_samples)\n actions = np.where(is_greedy, greedy_actions, random_actions)\n action_idx = actions, np.arange(num_samples)\n optimal_actions[t, actions == optimal_action] += 1\n\n # Sample Environment\n noise_term = np.random.normal(scale=1., size=num_samples)\n R_t_a[t][action_idx] = q_star_a[action_idx] + noise_term\n\n # Update Estimate\n K_a[action_idx] += 1\n step_size = 1 / K_a[action_idx]\n target = R_t_a[t][action_idx]\n old_estimate = Q_a[action_idx]\n\n Q_a[action_idx] = old_estimate + step_size * (target - old_estimate)\n\n\nR_t = np.mean(np.sum(R_t_a, axis=1).reshape([num_iter, num_trials, -1]), axis=1)\nplt.subplot(211)\nfor eps in range(len(epsilon_values)):\n plt.plot(R_t[:, eps], label=\"eps = %f\" % epsilon_values[eps])\nplt.xlabel('Steps')\nplt.ylabel('Average reward')\nplt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0.)\nplt.subplot(212)\nplt.plot(np.mean(optimal_actions.reshape([num_iter, num_trials, -1]), axis=1))\nplt.xlabel('Steps')\nplt.ylabel('Optimal action')\nplt.show()\n","sub_path":"bandit.py","file_name":"bandit.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"239763278","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Documentation source:\n# - https://gbdev.gg8.se/wiki/articles/Sound_Controller\n\nfrom vsgb.audio.abstract_sound_channel import AbstractSoundChannel\nfrom vsgb.audio.volume_envelope import VolumeEnvelope\nfrom vsgb.io_registers import IO_Registers\n\nclass SoundChannel2(AbstractSoundChannel):\n\n def __init__(self, cgb_mode):\n super().__init__(IO_Registers.NR_21 - 1, 64, cgb_mode)\n self.freq_divider = 0\n self.volume_envelope = VolumeEnvelope()\n\n def start(self):\n self.i = 0\n if self.cgb_mode:\n self.length.reset()\n self.length.start()\n self.volume_envelope.start()\n\n def trigger(self):\n self.i = 0\n self.freq_divider = 1\n self.volume_envelope.trigger()\n\n def step(self, ticks):\n self.volume_envelope.step(ticks)\n e = self.update_length(ticks) and self.dac_enabled\n if not e:\n return 0\n self.freq_divider -= 1\n if self.freq_divider == 0:\n self.reset_freq_divider()\n self.last_output = (self.get_duty() & (1 >> self.i)) >> self.i\n self.i = (self.i + ticks) % 8\n return self.last_output * self.volume_envelope.get_volume()\n\n def set_nr0(self, value):\n super().set_nr0(value)\n\n def set_nr1(self, value):\n super().set_nr1(value)\n self.length.set_length(64 - (value & 0b00111111))\n\n def set_nr2(self, value):\n super().set_nr2(value)\n self.volume_envelope.set_nr2(value)\n self.dac_enabled = (value & 0b11111000) != 0\n\n\n def get_duty(self):\n return {\n 0: 0b00000001,\n 1: 0b10000001,\n 2: 0b10000111,\n 3: 0b01111110\n }.get(self.get_nr1() >> 6)\n\n def reset_freq_divider(self):\n self.freq_divider = self.get_frequency() * 4\n ","sub_path":"vsgb/audio/sound_channel2.py","file_name":"sound_channel2.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"147540212","text":"from __future__ import print_function\n\nimport functools\nimport json\nimport os\nimport re\nimport requests\n\nfrom ngi_pipeline.log.loggers import minimal_logger\nfrom ngi_pipeline.utils.classes import memoized\n\n# Need a better way to log\nLOG = minimal_logger(__name__)\n\n\ntry:\n CHARON_API_TOKEN = os.environ['CHARON_API_TOKEN']\n CHARON_BASE_URL = os.environ['CHARON_BASE_URL']\n # Remove trailing slashes\n m = re.match(r'(?P.*\\w+)/*', CHARON_BASE_URL)\n if m:\n CHARON_BASE_URL = m.groups()[0]\nexcept KeyError as e:\n raise ValueError(\"Could not get required environmental variable \"\n \"\\\"{}\\\"; cannot connect to database.\".format(e))\n\n\n## TODO Might be better just to instantiate this when loading the module. Do we neeed a new instance every time? I don't think so\nclass CharonSession(requests.Session):\n def __init__(self, api_token=None, base_url=None):\n super(CharonSession, self).__init__()\n\n self._api_token = api_token or CHARON_API_TOKEN\n self._api_token_dict = {'X-Charon-API-token': self._api_token}\n self._base_url = base_url or CHARON_BASE_URL\n\n #def get(url_args, *args, **kwargs):\n # url = self.construct_charon_url(url_args)\n # return validate_response(super(CharonSession, self).get(url,\n # headers=self._api_token_dict,\n # *args, **kwargs))\n\n self.get = validate_response(functools.partial(self.get,\n headers=self._api_token_dict, timeout=3))\n self.post = validate_response(functools.partial(self.post,\n headers=self._api_token_dict, timeout=3))\n self.put = validate_response(functools.partial(self.put,\n headers=self._api_token_dict, timeout=3))\n self.delete = validate_response(functools.partial(self.delete,\n headers=self._api_token_dict, timeout=3))\n\n self._project_params = (\"projectid\", \"name\", \"status\", \"pipeline\", \"bpa\")\n self._sample_params = (\"sampleid\", \"status\", \"received\", \"qc_status\",\n \"genotyping_status\", \"genotyping_concordance\",\n \"lims_initial_qc\", \"total_autosomal_coverage\")\n self._libprep_params = (\"libprepid\", \"limsid\", \"status\")\n self._seqrun_params = ('seqrunid', 'sequencing_status', 'alignment_status',\n 'runid', 'seq_qc_flag', 'demux_qc_flag',\n 'mean_coverage', 'std_coverage', 'GC_percentage',\n 'aligned_bases', 'mapped_bases', 'mapped_reads',\n 'total_reads', 'sequenced_bases', 'windows', 'bam_file',\n 'output_file', 'mean_mapping_quality', 'bases_number',\n 'contigs_number', 'mean_autosomal_coverage', 'lanes',\n 'alignment_coverage', 'reads_per_lane')\n self._seqrun_reset_params = tuple(set(self._seqrun_params) - \\\n set(['demux_qc_flag', 'lanes', 'windows', 'seq_qc_flag',\n 'alignment_coverage', 'alignment_status',\n 'sequencing_status', 'total_reads', 'runid', 'seqrunid']))\n\n\n ## Another option is to build this into the get/post/put/delete requests\n ## --> Do we ever need to call this (or those) separately?\n @memoized\n def construct_charon_url(self, *args):\n \"\"\"Build a Charon URL, appending any *args passed.\"\"\"\n return \"{}/api/v1/{}\".format(self._base_url,'/'.join([str(a) for a in args]))\n\n\n ## FIXME There's a lot of repeat code here that might could be condensed\n\n # Project\n def project_create(self, projectid, name=None, status=None, pipeline=None, bpa=None):\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._project_params }\n return self.post(self.construct_charon_url('project'),\n data=json.dumps(data)).json()\n\n def project_get(self, projectid):\n return self.get(self.construct_charon_url('project', projectid)).json()\n\n\n def project_get_samples(self, projectid):\n return self.get(self.construct_charon_url('samples', projectid)).json()\n \n def project_update(self, projectid, name=None, status=None, pipeline=None, bpa=None):\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._project_params if l_dict.get(k)}\n return self.put(self.construct_charon_url('project', projectid),\n data=json.dumps(data)).text\n\n def projects_get_all(self):\n return self.get(self.construct_charon_url('projects')).json()\n\n def project_delete(self, projectid):\n return self.delete(self.construct_charon_url('project', projectid)).text\n\n # Sample\n def sample_create(self, projectid, sampleid, status=None, received=None,\n qc_status=None, genotyping_status=None,\n genotyping_concordance=None, lims_initial_qc=None,\n total_autosomal_coverage=None):\n url = self.construct_charon_url(\"sample\", projectid)\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._sample_params }\n return self.post(url, json.dumps(data)).json()\n\n def sample_get(self, projectid, sampleid):\n url = self.construct_charon_url(\"sample\", projectid, sampleid)\n return self.get(url).json()\n\n def sample_get_libpreps(self, projectid, sampleid):\n return self.get(self.construct_charon_url('libpreps', projectid, sampleid)).json()\n\n def sample_update(self, projectid, sampleid, status=None, received=None,\n qc_status=None, genotyping_status=None,\n genotyping_concordance=None, lims_initial_qc=None,\n total_autosomal_coverage=None):\n url = self.construct_charon_url(\"sample\", projectid, sampleid)\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._sample_params if l_dict.get(k)}\n return self.put(url, json.dumps(data)).text\n\n ## Eliminate?\n def samples_get_all(self, projectid):\n return self.project_get_samples(projectid)\n #return self.get(self.construct_charon_url('samples', projectid)).json()\n\n # LibPrep\n def libprep_create(self, projectid, sampleid, libprepid, status=None, limsid=None):\n url = self.construct_charon_url(\"libprep\", projectid, sampleid)\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._libprep_params }\n return self.post(url, json.dumps(data)).json()\n\n def libprep_get(self, projectid, sampleid, libprepid):\n url = self.construct_charon_url(\"libprep\", projectid, sampleid, libprepid)\n return self.get(url).json()\n\n def libprep_get_seqruns(self, projectid, sampleid, libprepid):\n return self.get(self.construct_charon_url('seqruns', projectid, sampleid, libprepid)).json()\n\n\n def libprep_update(self, projectid, sampleid, libprepid, status=None, limsid=None):\n url = self.construct_charon_url(\"libprep\", projectid, sampleid, libprepid)\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._libprep_params if l_dict.get(k)}\n return self.put(url, json.dumps(data)).text\n\n ## Eliminate?\n def libpreps_get_all(self, projectid, sampleid):\n return self.sample_get_libpreps(projectid, sampleid)\n #return self.get(self.construct_charon_url('libpreps', projectid, sampleid)).json()\n\n # SeqRun\n def seqrun_create(self, projectid, sampleid, libprepid, seqrunid,\n total_reads, mean_autosomal_coverage, reads_per_lane=None,\n sequencing_status=None, alignment_status=None, runid=None,\n seq_qc_flag=None, demux_qc_flag=None, mean_coverage=None,\n std_coverage=None, GC_percentage=None, aligned_bases=None,\n mapped_bases=None, mapped_reads=None,\n sequenced_bases=None, windows=None, bam_file=None,\n output_file=None, mean_mapping_quality=None,\n bases_number=None, contigs_number=None,\n lanes=None,\n alignment_coverage=None):\n url = self.construct_charon_url(\"seqrun\", projectid, sampleid, libprepid)\n l_dict = locals()\n data = { k: l_dict.get(k) for k in self._seqrun_params }\n return self.post(url, json.dumps(data)).json()\n\n def seqrun_get(self, projectid, sampleid, libprepid, seqrunid):\n url = self.construct_charon_url(\"seqrun\", projectid, sampleid, libprepid, seqrunid)\n return self.get(url).json()\n\n def seqrun_update(self, projectid, sampleid, libprepid, seqrunid,\n total_reads=None, mean_autosomal_coverage=None, reads_per_lane=None,\n sequencing_status=None, alignment_status=None, runid=None,\n seq_qc_flag=None, demux_qc_flag=None, mean_coverage=None,\n std_coverage=None, GC_percentage=None, aligned_bases=None,\n mapped_bases=None, mapped_reads=None,\n sequenced_bases=None, windows=None, bam_file=None,\n output_file=None, mean_mapping_quality=None,\n bases_number=None, contigs_number=None,\n lanes=None, alignment_coverage=None,\n *args, **kwargs):\n ## TODO Consider implementing for allathese functions\n if args: LOG.debug(\"Ignoring extra args: {}\".format(\", \".join(*args)))\n if kwargs: LOG.debug(\"Ignoring extra kwargs: {}\".format(\", \".join([\"{}: {}\".format(k,v) for k,v in kwargs.iteritems()])))\n url = self.construct_charon_url(\"seqrun\", projectid, sampleid, libprepid, seqrunid)\n l_dict = locals()\n data = { k: str(l_dict.get(k)) for k in self._seqrun_params if l_dict.get(k)}\n return self.put(url, json.dumps(data)).text\n\n def seqrun_reset(self, projectid, sampleid, libprepid, seqrunid):\n url = self.construct_charon_url(\"seqrun\", projectid, sampleid, libprepid, seqrunid)\n data = { k: None for k in self._seqrun_reset_params}\n return self.put(url, json.dumps(data)).text\n\n\n def seqruns_get_all(self, projectid, sampleid, libprepid):\n return self.libprep_get_seqruns(projectid, sampleid, libprepid)\n #return self.get(self.construct_charon_url('seqruns', projectid, sampleid, libprepid)).json()\n\n\n## TODO create different CharonError subclasses for different codes (e.g. 400, 404)\nclass CharonError(Exception):\n def __init__(self, message, status_code=None, *args, **kwargs):\n self.status_code = status_code\n super(CharonError, self).__init__(message, *args, **kwargs)\n\n\nclass validate_response(object):\n \"\"\"\n Validate or raise an appropriate exception for a Charon API query.\n \"\"\"\n def __init__(self, f):\n self.f = f\n ## Should these be class attributes? I don't really know\n self.SUCCESS_CODES = (200, 201, 204)\n # There are certainly more failure codes I need to add here\n self.FAILURE_CODES = {\n 400: (CharonError, (\"Charon access failure: invalid input \"\n \"data (reason '{response.reason}' / \"\n \"code {response.status_code} / \"\n \"url '{response.url}')\")),\n 404: (CharonError, (\"Charon access failure: not found \"\n \"in database (reason '{response.reason}' / \"\n \"code {response.status_code} / \"\n \"url '{response.url}')\")), # when else can we get this? malformed URL?\n 405: (CharonError, (\"Charon access failure: method not \"\n \"allowed (reason '{response.reason}' / \"\n \"code {response.status_code} / \"\n \"url '{response.url}')\")),\n 409: (CharonError, (\"Charon access failure: document \"\n \"revision conflict (reason '{response.reason}' / \"\n \"code {response.status_code} / \"\n \"url '{response.url}')\")),}\n\n def __call__(self, *args, **kwargs):\n response = self.f(*args, **kwargs)\n if response.status_code not in self.SUCCESS_CODES:\n try:\n err_type, err_msg = self.FAILURE_CODES[response.status_code]\n except KeyError:\n # Error code undefined, used generic text\n err_type = CharonError\n err_msg = (\"Charon access failure: {response.reason} \"\n \"(code {response.status_code} / url '{response.url}')\")\n raise err_type(err_msg.format(**locals()), response.status_code)\n return response\n","sub_path":"ngi_pipeline/database/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":13033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"649069031","text":"def ready_arguments(fname_or_dict, highRes):\n import numpy as np\n import pickle\n import chumpy as ch\n from chumpy.ch import MatVecMult\n from dataset.smpl_layer.posemapper import posemap\n import scipy.sparse as sp\n\n if not isinstance(fname_or_dict, dict):\n dd = pickle.load(open(fname_or_dict, 'rb'), encoding='latin1')\n # dd = pickle.load(open(fname_or_dict, 'rb'))\n else:\n dd = fname_or_dict\n\n want_shapemodel = 'shapedirs' in dd\n nposeparms = dd['kintree_table'].shape[1] * 3\n\n if 'trans' not in dd:\n dd['trans'] = np.zeros(3)\n if 'pose' not in dd:\n dd['pose'] = np.zeros(nposeparms)\n if 'shapedirs' in dd and 'betas' not in dd:\n dd['betas'] = np.zeros(dd['shapedirs'].shape[-1])\n\n for s in ['v_template', 'weights', 'posedirs', 'pose', 'trans', 'shapedirs', 'betas', 'J']:\n if (s in dd) and isinstance(dd[s], ch.ch.Ch):\n dd[s] = dd[s].r\n\n if want_shapemodel:\n dd['v_shaped'] = dd['shapedirs'].dot(dd['betas']) + dd['v_template']\n v_shaped = dd['v_shaped']\n J_tmpx = MatVecMult(dd['J_regressor'], v_shaped[:, 0])\n J_tmpy = MatVecMult(dd['J_regressor'], v_shaped[:, 1])\n J_tmpz = MatVecMult(dd['J_regressor'], v_shaped[:, 2])\n dd['J'] = ch.vstack((J_tmpx, J_tmpy, J_tmpz)).T\n dd['v_posed'] = v_shaped + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))\n else:\n dd['v_posed'] = dd['v_template'] + dd['posedirs'].dot(posemap(dd['bs_type'])(dd['pose']))\n\n if highRes is not None:\n with open(highRes, 'rb') as f:\n mapping, hf = pickle.load(f, encoding='latin1')\n num_betas = dd['shapedirs'].shape[-1]\n hv = mapping.dot(dd['v_template'].ravel()).reshape(-1, 3)\n J_reg = dd['J_regressor'].asformat('csr')\n dd['f'] = hf\n dd['v_template'] = hv\n dd['weights'] = np.hstack([\n np.expand_dims(\n np.mean(\n mapping.dot(np.repeat(np.expand_dims(dd['weights'][:, i], -1), 3)).reshape(-1, 3)\n , axis=1),\n axis=-1)\n for i in range(24)\n ])\n dd['posedirs'] = mapping.dot(dd['posedirs'].reshape((-1, 207))).reshape(-1, 3, 207)\n dd['shapedirs'] = mapping.dot(dd['shapedirs'].reshape((-1, num_betas))).reshape(-1, 3, num_betas)\n dd['J_regressor'] = sp.csr_matrix((J_reg.data, J_reg.indices, J_reg.indptr), shape=(24, hv.shape[0]))\n\n return dd\n","sub_path":"dataset/smpl_layer/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"111271814","text":"# sum of digit\n\nnumber = int(input('enter a digit'))\ntotal = 0\nwhile number>0:\n r = number % 10 # get the last digit\n total += r # add the digit to total\n number = number // 10 # remove the last digit from number\n\nprint('total',total)","sub_path":"basics/while_ex2.py","file_name":"while_ex2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"65495968","text":"# coding=utf-8\nimport codecs\nfrom collections import defaultdict\nimport fileinput\n\nimport os\nimport re\n\nVERNUM_PATTERN = r'REPGA_v(\\d+\\.){3}\\d'\nSCHEMA_NAME_PATTERN = r'\\w+_\\[MIS\\w*\\]'\nRUN_ALL_FNAME_PATTERN = r'run_all_\\d+_' + VERNUM_PATTERN + '_' + SCHEMA_NAME_PATTERN + '\\.sql'\n\n\ndef check_folder_name(value):\n \"\"\" Patch verzioszamamnak ellenorzese \"\"\"\n errors = []\n if not re.match('^' + VERNUM_PATTERN + '$', value):\n errors.append('Érvénytelen verziószám.')\n\n return errors\n\n\ndef check_main_dir(install_desc_fname):\n \"\"\" A patch fokonyvtaranak ellenorzese. \"\"\"\n\n if install_desc_fname not in os.listdir(os.curdir):\n raise IOError('Nem található %s nevű telepítési leírás a főkönyvtárban.' % install_desc_fname)\n\n return parse_install_file(install_desc_fname), __find_run_alls_in_desc(\n install_desc_fname)\n\n\ndef parse_install_file(install_desc_fname):\n \"\"\" Az installaciot leiro fajl ellenorzese \"\"\"\n\n keywords = __get_keywords()\n with codecs.open(install_desc_fname, encoding='cp1252') as descfile:\n for descline in descfile:\n for keyword in keywords:\n if keyword in descline:\n keywords.remove(keyword)\n\n errors = ['Nincs megadva a(z) %s mező' % kw for kw in keywords]\n\n return errors\n\n\ndef __find_run_alls_in_desc(install_desc_fname):\n run_alls = []\n with open(install_desc_fname) as descfile:\n for line in descfile:\n run_all_match = re.search(RUN_ALL_FNAME_PATTERN, line)\n if run_all_match:\n run_alls.append(run_all_match.group())\n\n return set(run_alls)\n\n\ndef __get_keywords():\n ret = []\n try:\n with codecs.open(os.path.join(os.path.dirname(__file__),\n '../../conf/temp_install.txt'),\n encoding='cp1252') as kwfile:\n for keyword in kwfile:\n ret.append(keyword.strip())\n except IOError:\n raise IOError('Hiba a conf/temp_install.txt fájl megnyitása közben.')\n\n return ret\n\n\ndef check_docs_dir(ver):\n \"\"\" A dokumentumok konyvtar ellenorzese. \"\"\"\n docs_dirname = '1_Dokumentumok'\n if docs_dirname not in os.listdir(os.curdir):\n return ['A patchben nincs Dokumentumok könyvtár.']\n\n pattern = '^FAT_' + ver + '.xlsx?$'\n for fn in os.listdir(os.path.join(os.curdir, docs_dirname)):\n if re.match(pattern, fn):\n return []\n return ['Hiányzó vagy hibás nevű FAT állomány.']\n\n\n\ndef check_db_dir(described_run_alls):\n \"\"\" Az adatbazis konyvtar fajljainak ellenorzese. \"\"\"\n errors = []\n db_dirname = 'Adatbazis'\n if db_dirname not in os.listdir(os.curdir):\n raise IOError('Nem található a patch Adatbazis mappája.')\n\n run_alls_in_db_dir = set([fname for fname in os.listdir(db_dirname) if\n re.match('^' + RUN_ALL_FNAME_PATTERN + '$',\n fname)])\n\n not_existing_run_alls = described_run_alls - run_alls_in_db_dir\n undescribed_run_alls = run_alls_in_db_dir - described_run_alls\n errors.extend(\n ['A telepítési fájlban megadott %s fájl nem létezik' % ra for ra in\n not_existing_run_alls])\n errors.extend(\n ['A(z) %s fájl nem található a telepítése leírásban' % ra for ra in\n undescribed_run_alls])\n\n schemas = [re.search('(?<=_)' + SCHEMA_NAME_PATTERN, fn).group() for fn in\n run_alls_in_db_dir]\n\n run_all_file_errors, scripts = check_run_all_files(\n [os.path.join('Adatbazis', ra) for ra in run_alls_in_db_dir])\n return errors, run_all_file_errors, schemas, scripts\n\n\ndef check_run_all_files(run_alls):\n \"\"\" A runall file-ok adatainak beolvasasa. \"\"\"\n errors = []\n scripts = defaultdict(list)\n for line in fileinput.input(run_alls):\n line = line.strip()\n included_script = re.match(\n '@(' + SCHEMA_NAME_PATTERN + r'([/|\\\\]\\w+)+\\.sql)', line)\n fname = os.path.basename(fileinput.filename())\n if included_script:\n scripts[included_script.group(1).replace('\\\\', '/')].append(fname)\n\n if line.startswith('SPOOL ./_Install_Logs/'):\n log_file_match = re.match(\n r'SPOOL \\./_Install_Logs/([\\w\\.\\[\\]]*)(?=_&)', line)\n if not log_file_match or log_file_match.group(1) + '.sql' != fname:\n errors.append('Nem egyezik a log neve a %s fájlban.' % fname)\n\n return errors, scripts\n\n\ndef check_db_folder_dirs(dirs_in_db_folder):\n \"\"\" Az Adatbazis konyvtar konyvtarainak ellenorzese \"\"\"\n if not dirs_in_db_folder:\n return ['Az Adatbázis könyvtár nem tartalmaz könyvtárakat.']\n\n if '_Install_Logs' not in dirs_in_db_folder:\n return ['Az Adatbázis konyvtarban nincs _Install_Logs nevű konyvtár']\n\n return []\n\n\ndef check_schema_dirs(schemas, dirs_in_db_folder):\n errors = []\n existing_schema_folders = set(schemas).intersection(dirs_in_db_folder)\n errors.extend(['A(z) %s sémához nem található mappa.' % schema for schema in\n set(schemas) - existing_schema_folders])\n errors.extend(['A(z) %s séma mappája üres.' % schema for schema in\n existing_schema_folders if\n not os.listdir(os.path.join('Adatbazis', schema))])\n\n return errors\n\n\ndef check_script_dirs(dirs_in_db_folders, scripts_in_run_alls):\n \"\"\" A semakonyvtarak ellenorzese \"\"\"\n ilog_folder_name = '_Install_Logs'\n if ilog_folder_name in dirs_in_db_folders:\n dirs_in_db_folders.remove('_Install_Logs')\n\n sql_files = []\n\n for schema_dir in dirs_in_db_folders:\n sql_files_dir = os.path.join('Adatbazis', schema_dir)\n for dirpath, dirnames, fnames in os.walk(sql_files_dir):\n sql_files.extend(\n [dirpath.replace('Adatbazis\\\\', '').replace('\\\\', '/') + '/' + fname for fname in fnames if\n fname.endswith('.sql')])\n\n errors = []\n existing_sql_files = set(sql_files)\n referenced_scripts_in_run_alls = set(scripts_in_run_alls.keys())\n\n errors.extend(\n ['A(z) %s nevű script egy run_all fájlban sem szerepel.' % sql for sql\n in existing_sql_files - referenced_scripts_in_run_alls])\n\n not_existing_scripts = referenced_scripts_in_run_alls - existing_sql_files\n errors.extend([\n 'A(z) %s nevű script benne van a %s fájl(ok)ban, de nem létezik.' % (\n script, ','.join(scripts_in_run_alls[script])) for script in\n not_existing_scripts])\n\n return errors\n\n\ndef check_install_logs_folder():\n ilog_folder_path = 'Adatbazis/_Install_Logs'\n if not os.path.isdir(ilog_folder_path):\n return ['A patch nem tartalmaz %s mappát.' % ilog_folder_path]\n logs = os.listdir(ilog_folder_path)\n if not logs:\n return ['A patch %s mappája üres.' % ilog_folder_path]\n return ['A(z) %s install log fájl kiterjesztése érvénytelen.' % fname for fname\n in logs if not fname.endswith('.log')]\n\n\ndef find_errors():\n \"\"\" A hibak ellenorzeset vegzo fuggvenyek meghivasa \"\"\"\n\n # TODO eliminate the repeated errors.extend(...) calls -> something similar to a list literal\n vernum = os.path.basename(os.getcwd())\n errors = []\n errors.extend(check_folder_name(vernum))\n\n install_desc_fname = 'Telepitesi_leiras_' + vernum + \".txt\"\n main_dir_errors, run_alls = check_main_dir(install_desc_fname)\n errors.extend(main_dir_errors)\n\n errors.extend(check_docs_dir(vernum))\n\n db_dir_errors, run_all_errors, schemas, scripts = check_db_dir(set(run_alls))\n errors.extend(db_dir_errors)\n errors.extend(run_all_errors)\n\n dirs_in_db_folder = [os.path.basename(dirname) for dirname in\n os.listdir('Adatbazis') if\n os.path.isdir(os.path.join('Adatbazis', dirname))]\n errors.extend(check_db_folder_dirs(dirs_in_db_folder))\n\n errors.extend(check_schema_dirs(schemas, dirs_in_db_folder))\n\n errors.extend(check_script_dirs(dirs_in_db_folder, scripts))\n\n errors.extend(check_install_logs_folder())\n\n return errors\n\ndef run_old_checks():\n try:\n return find_errors()\n except IOError as detail:\n raise ValueError(detail)\n\n\n","sub_path":"pyv/pyv/engine/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"26198796","text":"import xlrd\nimport xlwt\nimport json\n\nmainObj = {}\nheadLines = []\noutputRows = []\nloc = \"/Users/joel/Documents/DataFiles/HistoryOMX.xls\"\noutLoc = \"/Users/joel/Documents/DataFiles/HistoryOMXformatted.xlsx\"\n\nwb = xlrd.open_workbook(loc)\nsheet = wb.sheet_by_index(0)\nprint(sheet)\nnumCols = sheet.ncols\nnRows = sheet.nrows\n\n\ndef looper():\n rowList = []\n for i in range(nRows):\n if i != 0:\n for j in range(len(sheet.row(i))-4):\n rowList.append(sheet.cell_value(i, j))\n recAdd(rowList, mainObj)\n rowList = []\n else:\n for j in range(len(sheet.row(i))-1):\n headLines.append(sheet.cell_value(i, j))\n mainObj['HeadLines'] = headLines\n\n\ndef recAdd(data, obj):\n if len(data) > 0:\n if data[0] in obj:\n sendObj = obj[data[0]]\n data.pop(0)\n recAdd(data, sendObj)\n else:\n obj[data[0]] = {}\n sendObj = obj[data[0]]\n data.pop(0)\n recAdd(data, sendObj)\n \n return\n\ndef prepForSum(myDict):\n del myDict[\"HeadLines\"]\n myDict = sumLastRow(myDict, [])\n myDict[\"HeadLines\"] = headLines\n return myDict\n\n\ndef sumLastRow(myDict, keys):\n if(len(myDict) != 0):\n for key in myDict.keys():\n keys.append(key)\n \n if(len(myDict[key]) == 0):\n myDict = sum(myDict.keys())/len(myDict.keys())\n return myDict\n else:\n myDict[key] = sumLastRow(myDict[key], keys)\n return myDict\n\ndef concatRowVector(obj, values):\n for key in obj.keys():\n myValues = [] + values\n if(key != \"HeadLines\"):\n if type(obj[key]) is dict:\n myValues.append(key)\n concatRowVector(obj[key], myValues)\n else:\n valVec = myValues + [key, obj[key]]\n outputRows.append(valVec)\n\ndef writeToExcel():\n outWb = xlwt.Workbook(encoding = 'ascii')\n worksheet = outWb.add_sheet('Sheet')\n for i in range(len(outputRows)):\n for j in range(len(outputRows[i])):\n worksheet.write(i, j, label = outputRows[i][j])\n outWb.save(outLoc)\n\n\n\n#Calc length of the lowest tier. May not be used\ndef calcLength(obj):\n for key in obj.keys():\n if(key != \"HeadLines\"):\n if type(obj[key]) is dict:\n length = calcLength(obj[key])\n else:\n length = len(obj)\n return length\n\n\nlooper()\nmaiObj = prepForSum(mainObj)\n\nconcatRowVector(mainObj, [])\n\nif True:\n headLines = ['Year', 'Month', 'Cost']\n mainObj[\"HeadLines\"] = headLines\n outputRows.insert(0, headLines)\n\nwriteToExcel()\nprint(\"Finish\")","sub_path":"generalExcelFormatter.py","file_name":"generalExcelFormatter.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"488029123","text":"import numpy as np\nimport scipy.io as scio\nimport matplotlib.pyplot as plt\nimport torch.utils.data.dataset\n\n\nclass matSet(torch.utils.data.dataset.Dataset):\n def __init__(self, path, mode):\n super(matSet, self).__init__()\n self.mat = scio.loadmat(path)[mode]\n self.mode = mode\n\n\n def __getitem__(self, item):\n return torch.tensor(self.mat[item], dtype=torch.float)\n\n def __len__(self):\n return self.mat.shape[0]\n\n def show_plt(self):\n plt.scatter(self.mat.T[0], self.mat.T[1], label=self.mode, alpha=0.2, c='r')\n\n\nif __name__ == '__main__':\n pointsA = matSet('./points.mat', 'a')\n pointsB = matSet('./points.mat', 'b')\n pointsC = matSet('./points.mat', 'c')\n pointsD = matSet('./points.mat', 'd')\n pointsXX = matSet('./points.mat', 'xx')\n # xx 8192\n #\n # print(points.mat.shape)\n # points.mat = points.mat.T\n # print(points.mat.shape)\n # plt.plot(points.mat[0], points.mat[1], '.')\n # for x in range(len(points)):\n # plt.scatter(points[x][0], points[x][1])\n # pointsA.show_plt()\n # pointsB.show_plt()\n # pointsC.show_plt()\n # pointsD.show_plt()\n pointsXX.show_plt()\n plt.legend()\n plt.show()\n\n","sub_path":"lab5/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"232526589","text":"from selenium import webdriver\n\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\ndef test_chrome_manager_with_selenium():\n driver_path = ChromeDriverManager().install()\n driver = webdriver.Chrome(driver_path)\n driver.get(\"http://automation-remarks.com\")\n driver.close()\n","sub_path":"tests_xdist/test_cuncurent_2.py","file_name":"test_cuncurent_2.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"654479646","text":"# coding: utf-8\n\n'''\n分别用 Lax-Friedrichs, Lax-Wendroff, Roe, ENO 求解一维激波管问题.\n\n方程(下标表示偏导数):\n (rho, rho*u, E)_t + (rho*u, rho*u**2 + p, u*(E + p))_x = 0\n初始条件:\n (rho, u, p) = x<=0.5: (0.5, 0, 0.571); x>0.5: (0.445, 0.698, 3.528)\n\n参考教材: 计算流体力学基础及其应用,John D. Anderson(中文版)\n'''\n\nfrom __future__ import division\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\n\ndef Animation(x=None, u=None, nt=0):\n '''\n :param x:\n :param u: 所有时间步的数值解,是一个二维矩阵,矩阵每一行表示某个时间步的数值解\n :param nt:\n :return:\n '''\n # 图像所在区域的大小\n xmin, xmax = -0.01, 1.01\n ymin, ymax = -1, 2\n # xmin, xmax = 0, 1\n # ymin, ymax = 0.22, 0.72\n\n fig = plt.figure()\n ax = plt.axes(xlim=(xmin, xmax), ylim=(ymin, ymax))\n ln, = ax.plot([], [], lw=2, animated=True)\n\n def init():\n ln.set_data([], [])\n return ln,\n\n def update(frame): # 这里参数表示帧数frame\n un = u[frame, :]\n # print(un)\n ln.set_data(x, un)\n return ln,\n\n ani = animation.FuncAnimation(fig, update, frames=nt, init_func=init, blit=True, interval=200)\n ani.save('/home/fan/Lax_Wendroff_pressure.mpeg', writer=\"ffmpeg\")\n\n plt.title(\"Lax_Wendroff_pressure\")\n plt.show()\n\n\ndef Lax_Friedrichs(rho=0, rho_u=0, E=0, dx=0, dt=0, nx=0, nt=0, animate=1):\n '''\n 利用Lax-Friedrichs格式求解,守恒形式的差分格式为: \\partial F / \\partial t + \\partial G / \\partial x = 0\n animate: 1, 2, 3 分别画出 rho, u, p 的动图\n '''\n if dt > dx:\n raise ValueError(\"The scheme is not stable!!!\")\n def conserv2origin(rho=0, rho_u=0, E=0):\n '''\n 把守恒变量 (rho, rho*u, E) 转化为原始变量 (rho, u, p).\n 重点计算 E,见参考教材p58,p62\n '''\n u = rho_u / rho\n gamma = 1.4 # 对于空气,是个常数\n p = (gamma - 1) * (E - 0.5 * rho * u**2)\n return (rho, u, p)\n\n rho, u, p = conserv2origin(rho=rho, rho_u=rho_u, E=E) # 将初始的守恒变量转换成初始的原始变量\n\n # 保存所有时间部的计算结果,可以用来画动画\n rho_n = np.zeros([nt, nx])\n rho_u_n = np.zeros([nt, nx])\n E_n = np.zeros([nt, nx])\n\n # 初值\n rho_n[0, :] = rho\n rho_u_n[0, :] = rho_u\n E_n[0, :] = E\n\n # 假定边界条件如下\n rho_n[:, 0] = 0.5\n rho_n[:, -1] = 0.445\n rho_u_n[:, 0] = rho_u[0]\n rho_u_n[:, -1] = rho_u[-1]\n E_n[:, 0] = E[0]\n E_n[:, -1] = E[-1]\n\n for t in range(1, nt):\n for i in range(1, nx-1):\n rho_n[t, i] = 0.5 * (rho_n[t-1, i+1] + rho_n[t-1, i-1]) \\\n - dt * (rho[i+1]*u[i+1] - rho[i-1]*u[i-1]) / (2 * dx)\n\n rho_u_n[t, i] = 0.5 * (rho_u_n[t-1, i+1] + rho_u_n[t-1, i-1]) \\\n - dt * ((rho[i+1]*u[i+1]**2 + p[i+1]) - (rho[i-1]*u[i-1]**2 + p[i-1])) / (2 * dx)\n\n E_n[t, i] = 0.5 * (E_n[t-1, i+1] + E_n[t-1, i-1]) \\\n - dt * (u[i+1]*(E_n[t-1, i+1] + p[i+1]) - u[i-1]*(E_n[t-1, i-1] + p[i-1])) / (2 * dx)\n\n # 更新当前步的值\n rho, u, p = conserv2origin(rho=rho_n[t, :], rho_u=rho_u_n[t, :], E=E_n[t, :])\n\n # 把得到的所有时间步步的守恒变量的值转换为原始变量的值\n rho_n, u_n, p_n = conserv2origin(rho=rho_n, rho_u=rho_u_n, E=E_n)\n\n if animate == 1:\n Animation(x=np.linspace(0, 1, nx), u=rho_n, nt=nt)\n elif animate == 2:\n Animation(x=np.linspace(0, 1, nx), u=u_n, nt=nt)\n elif animate == 3:\n Animation(x=np.linspace(0, 1, nx), u=p_n, nt=nt)\n else: raise ValueError(\"No matches!\")\n\n\ndef Lax_Wendroff(rho=0, rho_u=0, E=0, dx=0, dt=0, nx=0, nt=0, animate=1):\n '''\n 利用Lax-Wendroff格式求解,守恒形式的差分格式为: \\partial F / \\partial t + \\partial G / \\partial x = 0\n animate: 1, 2, 3 分别画出 rho, u, p 的动图\n '''\n if dt > dx:\n raise ValueError(\"The scheme is not stable!!!\")\n def conserv2origin(rho=0, rho_u=0, E=0):\n '''\n 把守恒变量 (rho, rho*u, E) 转化为原始变量 (rho, u, p).\n 重点计算 E,见参考教材p58,p62\n '''\n u = rho_u / rho\n gamma = 1.4 # 对于空气,是个常数\n p = (gamma - 1) * (E - 0.5 * rho * u**2)\n return (rho, u, p)\n\n rho, u, p = conserv2origin(rho=rho, rho_u=rho_u, E=E) # 将初始的守恒变量转换成初始的原始变量\n\n # 保存所有时间步的计算结果,可以用来画动画\n rho_n = np.zeros([nt, nx])\n rho_u_n = np.zeros([nt, nx])\n E_n = np.zeros([nt, nx])\n\n # 初值\n rho_n[0, :] = rho\n rho_u_n[0, :] = rho_u\n E_n[0, :] = E\n\n # 假定任意时刻的边界条件如下\n rho_n[:, 0] = 0.5\n rho_n[:, -1] = 0.445\n rho_u_n[:, 0] = rho_u[0]\n rho_u_n[:, -1] = rho_u[-1]\n E_n[:, 0] = E[0]\n E_n[:, -1] = E[-1]\n\n for t in range(1, nt):\n for i in range(1, nx-1):\n rho_n[t, i] = rho_n[t-1, i] \\\n - (dt/dx) * (rho[i+1]*u[i+1] - rho[i-1]*u[i-1]) / 2 \\\n + (dt/dx)**2 * (rho[i+1]*u[i+1] - 2*rho[i]*u[i] + rho[i-1]*u[i-1]) / 2\n\n rho_u_n[t, i] = rho_u_n[t-1, i] \\\n - (dt/dx) * ((rho[i+1]*u[i+1]**2 + p[i+1]) - (rho[i-1]*u[i-1]**2 + p[i-1])) / 2 \\\n + (dt/dx)**2 * ((rho[i+1]*u[i+1]**2 + p[i+1]) - 2*(rho[i]*u[i]**2 + p[i]) + (rho[i-1]*u[i-1]**2 + p[i-1])) / 2\n\n E_n[t, i] = E_n[t-1, i] \\\n - (dt/dx) * (u[i+1]*(E_n[t-1, i+1] + p[i+1]) - u[i-1]*(E_n[t-1, i-1] + p[i-1])) / 2 \\\n + (dt/dx)**2 * (u[i+1] * (E_n[t-1, i+1] + p[i+1]) - 2*u[i]*(E_n[t-1, i] + p[i]) + u[i-1]*(E_n[t-1, i-1] + p[i-1])) / 2\n\n # 更新当前步的值\n rho, u, p = conserv2origin(rho=rho_n[t, :], rho_u=rho_u_n[t, :], E=E_n[t, :])\n\n rho_n, u_n, p_n = conserv2origin(rho=rho_n, rho_u=rho_u_n, E=E_n)\n\n if animate == 1:\n Animation(x=np.linspace(0, 1, nx), u=rho_n, nt=nt)\n elif animate == 2:\n Animation(x=np.linspace(0, 1, nx), u=u_n, nt=nt)\n elif animate == 3:\n Animation(x=np.linspace(0, 1, nx), u=p_n, nt=nt)\n else: raise ValueError(\"No matches!\")\n\n\ndef Roe():\n\n pass\ndef ENO():\n\n pass\n\n\nif __name__ == '__main__':\n nx = 101 # 区间[0, 1]的节点个数\n dx = 1 / (nx - 1) # 空间步长\n nt = 30 # 模拟多少个时间步\n dt = 0.0005 # 时间步长\n\n # 密度rho的初值\n rho = np.zeros(nx)\n rho[0: int(0.5 / dx + 1)] = 0.5\n # rho[int(0.5 / dx + 1): int(1 / dx + 1)] = 0.445\n rho[int(0.5 / dx + 1):] = 0.445\n\n # 速度u的初值\n u = np.zeros(nx)\n u[int(0.5/dx + 1):] = 0.698\n\n # 能量E的初值\n E = np.zeros(nx)\n E[0: int(0.5 / dx + 1)] = 0.571\n # E[int(0.5 / dx + 1): int(1 / dx + 1)] = 3.528\n E[int(0.5 / dx + 1):] = 3.528\n\n\n # ------------------------------------- Lax-Friedrichs 格式 -----------------------------------------------\n # Lax_Friedrichs(rho=rho, rho_u=rho*u, E=E, dx=dx, dt=dt, nx=nx, nt=nt, animate=3) # animate: 1, 2, 3 分别画出 rho, u, p 的动图\n\n\n # -------------------------------------- Lax-Wendroff 格式 -------------------------------------------------\n Lax_Wendroff(rho=rho, rho_u=rho*u, E=E, dx=dx, dt=dt, nx=nx, nt=nt, animate=3) # animate: 1, 2, 3 分别画出 rho, u, p 的动图\n\n\n # ------------------------------------------- Roe 格式 -----------------------------------------------------\n\n\n # ------------------------------------------- ENO 格式 -----------------------------------------------------\n","sub_path":"cfd_python/difference_schemes.py","file_name":"difference_schemes.py","file_ext":"py","file_size_in_byte":7742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"594097019","text":"import requests, csv\nimport numpy as np\nfrom bs4 import BeautifulSoup\nimport re, os, requests\nfrom url import url\nfrom file import file\nfrom multiprocessing import Pool\n\nmonth = 5\nyear = 2020\n\nif __name__=='__main__':\n\n # 测试file.py 文件功能\n path = \"F:\\H_img\"\n folder_name = str(year) + \"-\" + str(month)\n os.chdir(path)\n os.mkdir(folder_name)\n os.chdir(path + \"\\\\\" + folder_name)\n \n p = Pool(8)\n \n # 测试 url.py 文件功能\n total_info = url().get_info(year, month)\n print(len(total_info[1]))\n pre_url = total_info[4]\n ori_url = url().ori_url(pre_url)\n # print(ori_url)\n # 测试成功!\n\n print(total_info)\n \n \n rank = 1\n for url in ori_url:\n p.apply_async(file().download, args=(url, rank, total_info[1][rank-1]))\n rank += 1\n p.close()\n p.join()\n\n# headers = {\n# \"user-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36\",\n# \"referer\":\"https://www.pixiv.net/ranking.php\"\n# }\n\n# baseurl='https://www.pixiv.net/ranking.php?mode=monthly&content=illust&date=20200501'\n\n\n# html=requests.get(baseurl, headers=headers)\n# soup = BeautifulSoup(html.content, 'lxml')\n\n# all_infos = soup.find_all(class_=\"ranking-item\")\n\n# l_urls = []\n# l_ranks = []\n# l_pids = []\n# l_titles = []\n# l_authors = []\n\n# rank = 1\n# for info in all_infos:\n# url = info.find(\"img\").get('data-src')\n# pid = re.search('\\d{7,9}', url).group()\n# l_pids.append(pid)\n# l_urls.append(url)\n# l_titles.append(info.get(\"data-title\"))\n# l_authors.append(info.get(\"data-user-name\"))\n# l_ranks.append(rank)\n# rank += 1\n\n# total_info = np.transpose(np.array([l_ranks, l_titles, l_authors, l_pids,l_urls]))\n# print(total_info)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"171370998","text":"\"\"\"Tools to preprocess GPR lines before processing in third-party software such as ReflexW\"\"\"\n\nfrom os.path import isfile, join\nimport numpy as np\nimport pandas as pd\nimport math\nfrom collections import Counter\n\nfrom .gpr import MetaData, x_flip, empty_array\nfrom .mala import rad2dict, RD3, arr2rd3, Line\nfrom .filesystem import list_gpr_data, get_folder_and_filename\nfrom .calculations import ns2mm\n\n\nclass Coordinates(MetaData):\n \"\"\"Append coordinate data to a loaded collection of parallel segments. It may be sensible to put the loaded data through a grouping class to enable filtering of non-parallel segments.\"\"\"\n\n def __init__(self, segments, filename='segment_coordinates.csv', columns=[]):\n \"\"\"\n Attributes:\n dat |